summaryrefslogtreecommitdiffstats
path: root/src/VBox/VMM/testcase
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 14:19:18 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 14:19:18 +0000
commit4035b1bfb1e5843a539a8b624d21952b756974d1 (patch)
treef1e9cd5bf548cbc57ff2fddfb2b4aa9ae95587e2 /src/VBox/VMM/testcase
parentInitial commit. (diff)
downloadvirtualbox-4035b1bfb1e5843a539a8b624d21952b756974d1.tar.xz
virtualbox-4035b1bfb1e5843a539a8b624d21952b756974d1.zip
Adding upstream version 6.1.22-dfsg.upstream/6.1.22-dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/VMM/testcase')
-rwxr-xr-xsrc/VBox/VMM/testcase/Instructions/InstructionTestGen.py2239
-rw-r--r--src/VBox/VMM/testcase/Instructions/Makefile.kmk69
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-bs2-r0-32-big.mac35
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-bs2-r0-64-big.mac35
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-bs2-r0-64.mac35
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-bs2-r0-big.mac57
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-bs2-r0-common.mac115
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-bs2-r0.mac53
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-common.mac346
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-iprt-r3-32.mac19
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-iprt-r3-64.mac19
-rw-r--r--src/VBox/VMM/testcase/Instructions/env-iprt-r3.mac99
-rw-r--r--src/VBox/VMM/testcase/Instructions/itgTableDaa.py1105
-rw-r--r--src/VBox/VMM/testcase/Instructions/itgTableDas.py1105
-rw-r--r--src/VBox/VMM/testcase/Instructions/tstVBInsTstR3.cpp120
-rw-r--r--src/VBox/VMM/testcase/Makefile.kmk653
-rw-r--r--src/VBox/VMM/testcase/NemRawBench-1.cpp1346
-rw-r--r--src/VBox/VMM/testcase/dev.tar.gzbin0 -> 732 bytes
-rwxr-xr-xsrc/VBox/VMM/testcase/mkdsk.sh76
-rw-r--r--src/VBox/VMM/testcase/tstAnimate.cpp943
-rw-r--r--src/VBox/VMM/testcase/tstAsmStructs.cpp54
-rw-r--r--src/VBox/VMM/testcase/tstAsmStructsAsm-lst.sed105
-rw-r--r--src/VBox/VMM/testcase/tstAsmStructsAsm.asm39
-rw-r--r--src/VBox/VMM/testcase/tstCFGM.cpp171
-rw-r--r--src/VBox/VMM/testcase/tstCompressionBenchmark.cpp642
-rw-r--r--src/VBox/VMM/testcase/tstGlobalConfig.cpp138
-rw-r--r--src/VBox/VMM/testcase/tstHelp.h169
-rw-r--r--src/VBox/VMM/testcase/tstIEMCheckMc.cpp769
-rw-r--r--src/VBox/VMM/testcase/tstMMHyperHeap.cpp284
-rw-r--r--src/VBox/VMM/testcase/tstMicro.h146
-rw-r--r--src/VBox/VMM/testcase/tstMicro.mac40
-rw-r--r--src/VBox/VMM/testcase/tstMicroRC.cpp258
-rw-r--r--src/VBox/VMM/testcase/tstMicroRC.def28
-rw-r--r--src/VBox/VMM/testcase/tstMicroRCA.asm558
-rw-r--r--src/VBox/VMM/testcase/tstPDMAsyncCompletion.cpp274
-rw-r--r--src/VBox/VMM/testcase/tstPDMAsyncCompletionStress.cpp648
-rw-r--r--src/VBox/VMM/testcase/tstSSM-2.cpp86
-rw-r--r--src/VBox/VMM/testcase/tstSSM.cpp935
-rw-r--r--src/VBox/VMM/testcase/tstVMM-HM.cpp121
-rw-r--r--src/VBox/VMM/testcase/tstVMMFork.cpp170
-rw-r--r--src/VBox/VMM/testcase/tstVMMR0CallHost-1.cpp181
-rw-r--r--src/VBox/VMM/testcase/tstVMREQ.cpp344
-rw-r--r--src/VBox/VMM/testcase/tstVMStruct.h1494
-rw-r--r--src/VBox/VMM/testcase/tstVMStructDTrace.cpp144
-rw-r--r--src/VBox/VMM/testcase/tstVMStructRC.cpp99
-rw-r--r--src/VBox/VMM/testcase/tstVMStructSize.cpp464
-rw-r--r--src/VBox/VMM/testcase/tstX86-1.cpp270
-rw-r--r--src/VBox/VMM/testcase/tstX86-1A.asm3443
-rw-r--r--src/VBox/VMM/testcase/tstX86-FpuSaveRestore.cpp116
-rw-r--r--src/VBox/VMM/testcase/tstX86-FpuSaveRestoreA.asm117
50 files changed, 20776 insertions, 0 deletions
diff --git a/src/VBox/VMM/testcase/Instructions/InstructionTestGen.py b/src/VBox/VMM/testcase/Instructions/InstructionTestGen.py
new file mode 100755
index 00000000..16c57a67
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/InstructionTestGen.py
@@ -0,0 +1,2239 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# $Id: InstructionTestGen.py $
+
+"""
+Instruction Test Generator.
+"""
+
+from __future__ import print_function;
+
+__copyright__ = \
+"""
+Copyright (C) 2012-2020 Oracle Corporation
+
+This file is part of VirtualBox Open Source Edition (OSE), as
+available from http://www.virtualbox.org. This file is free software;
+you can redistribute it and/or modify it under the terms of the GNU
+General Public License (GPL) as published by the Free Software
+Foundation, in version 2 as it comes in the "COPYING" file of the
+VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+"""
+__version__ = "$Revision: 135976 $";
+
+
+# pylint: disable=C0103,R0913
+
+
+# Standard python imports.
+import io;
+import os;
+from optparse import OptionParser
+import random;
+import sys;
+
+
+## @name Exit codes
+## @{
+RTEXITCODE_SUCCESS = 0;
+RTEXITCODE_SYNTAX = 2;
+## @}
+
+## @name Various C macros we're used to.
+## @{
+UINT8_MAX = 0xff
+UINT16_MAX = 0xffff
+UINT32_MAX = 0xffffffff
+UINT64_MAX = 0xffffffffffffffff
+def RT_BIT_32(iBit): # pylint: disable=C0103
+ """ 32-bit one bit mask. """
+ return 1 << iBit;
+def RT_BIT_64(iBit): # pylint: disable=C0103
+ """ 64-bit one bit mask. """
+ return 1 << iBit;
+## @}
+
+
+## @name ModR/M
+## @{
+X86_MODRM_RM_MASK = 0x07;
+X86_MODRM_REG_MASK = 0x38;
+X86_MODRM_REG_SMASK = 0x07;
+X86_MODRM_REG_SHIFT = 3;
+X86_MODRM_MOD_MASK = 0xc0;
+X86_MODRM_MOD_SMASK = 0x03;
+X86_MODRM_MOD_SHIFT = 6;
+## @}
+
+## @name SIB
+## @{
+X86_SIB_BASE_MASK = 0x07;
+X86_SIB_INDEX_MASK = 0x38;
+X86_SIB_INDEX_SMASK = 0x07;
+X86_SIB_INDEX_SHIFT = 3;
+X86_SIB_SCALE_MASK = 0xc0;
+X86_SIB_SCALE_SMASK = 0x03;
+X86_SIB_SCALE_SHIFT = 6;
+## @}
+
+## @name Prefixes
+## @
+X86_OP_PRF_CS = 0x2e;
+X86_OP_PRF_SS = 0x36;
+X86_OP_PRF_DS = 0x3e;
+X86_OP_PRF_ES = 0x26;
+X86_OP_PRF_FS = 0x64;
+X86_OP_PRF_GS = 0x65;
+X86_OP_PRF_SIZE_OP = 0x66;
+X86_OP_PRF_SIZE_ADDR = 0x67;
+X86_OP_PRF_LOCK = 0xf0;
+X86_OP_PRF_REPNZ = 0xf2;
+X86_OP_PRF_REPZ = 0xf3;
+X86_OP_REX_B = 0x41;
+X86_OP_REX_X = 0x42;
+X86_OP_REX_R = 0x44;
+X86_OP_REX_W = 0x48;
+## @}
+
+
+## @name General registers
+## @
+X86_GREG_xAX = 0
+X86_GREG_xCX = 1
+X86_GREG_xDX = 2
+X86_GREG_xBX = 3
+X86_GREG_xSP = 4
+X86_GREG_xBP = 5
+X86_GREG_xSI = 6
+X86_GREG_xDI = 7
+X86_GREG_x8 = 8
+X86_GREG_x9 = 9
+X86_GREG_x10 = 10
+X86_GREG_x11 = 11
+X86_GREG_x12 = 12
+X86_GREG_x13 = 13
+X86_GREG_x14 = 14
+X86_GREG_x15 = 15
+## @}
+
+
+## @name Register names.
+## @{
+g_asGRegs64NoSp = ('rax', 'rcx', 'rdx', 'rbx', None, 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15');
+g_asGRegs64 = ('rax', 'rcx', 'rdx', 'rbx', 'rsp', 'rbp', 'rsi', 'rdi', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15');
+g_asGRegs32NoSp = ('eax', 'ecx', 'edx', 'ebx', None, 'ebp', 'esi', 'edi',
+ 'r8d', 'r9d', 'r10d', 'r11d', 'r12d', 'r13d', 'r14d', 'r15d');
+g_asGRegs32 = ('eax', 'ecx', 'edx', 'ebx', 'esp', 'ebp', 'esi', 'edi',
+ 'r8d', 'r9d', 'r10d', 'r11d', 'r12d', 'r13d', 'r14d', 'r15d');
+g_asGRegs16NoSp = ('ax', 'cx', 'dx', 'bx', None, 'bp', 'si', 'di',
+ 'r8w', 'r9w', 'r10w', 'r11w', 'r12w', 'r13w', 'r14w', 'r15w');
+g_asGRegs16 = ('ax', 'cx', 'dx', 'bx', 'sp', 'bp', 'si', 'di',
+ 'r8w', 'r9w', 'r10w', 'r11w', 'r12w', 'r13w', 'r14w', 'r15w');
+g_asGRegs8 = ('al', 'cl', 'dl', 'bl', 'ah', 'ch', 'dh', 'bh');
+g_asGRegs8Rex = ('al', 'cl', 'dl', 'bl', 'spl', 'bpl', 'sil', 'dil',
+ 'r8b', 'r9b', 'r10b', 'r11b', 'r12b', 'r13b', 'r14b', 'r15b',
+ 'ah', 'ch', 'dh', 'bh');
+## @}
+
+## @name EFLAGS/RFLAGS/EFLAGS
+## @{
+X86_EFL_CF = RT_BIT_32(0);
+X86_EFL_CF_BIT = 0;
+X86_EFL_1 = RT_BIT_32(1);
+X86_EFL_PF = RT_BIT_32(2);
+X86_EFL_AF = RT_BIT_32(4);
+X86_EFL_AF_BIT = 4;
+X86_EFL_ZF = RT_BIT_32(6);
+X86_EFL_ZF_BIT = 6;
+X86_EFL_SF = RT_BIT_32(7);
+X86_EFL_SF_BIT = 7;
+X86_EFL_TF = RT_BIT_32(8);
+X86_EFL_IF = RT_BIT_32(9);
+X86_EFL_DF = RT_BIT_32(10);
+X86_EFL_OF = RT_BIT_32(11);
+X86_EFL_OF_BIT = 11;
+X86_EFL_IOPL = (RT_BIT_32(12) | RT_BIT_32(13));
+X86_EFL_NT = RT_BIT_32(14);
+X86_EFL_RF = RT_BIT_32(16);
+X86_EFL_VM = RT_BIT_32(17);
+X86_EFL_AC = RT_BIT_32(18);
+X86_EFL_VIF = RT_BIT_32(19);
+X86_EFL_VIP = RT_BIT_32(20);
+X86_EFL_ID = RT_BIT_32(21);
+X86_EFL_LIVE_MASK = 0x003f7fd5;
+X86_EFL_RA1_MASK = RT_BIT_32(1);
+X86_EFL_IOPL_SHIFT = 12;
+X86_EFL_STATUS_BITS = ( X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_ZF | X86_EFL_SF | X86_EFL_OF );
+## @}
+
+## @name Random
+## @{
+g_iMyRandSeed = int((os.urandom(4)).encode('hex'), 16);
+#g_iMyRandSeed = 286523426;
+#g_iMyRandSeed = 1994382324;
+g_oMyRand = random.Random(g_iMyRandSeed);
+#g_oMyRand = random.SystemRandom();
+
+def randU8():
+ """ Unsigned 8-bit random number. """
+ return g_oMyRand.getrandbits(8);
+
+def randU16():
+ """ Unsigned 16-bit random number. """
+ return g_oMyRand.getrandbits(16);
+
+def randU32():
+ """ Unsigned 32-bit random number. """
+ return g_oMyRand.getrandbits(32);
+
+def randU64():
+ """ Unsigned 64-bit random number. """
+ return g_oMyRand.getrandbits(64);
+
+def randUxx(cBits):
+ """ Unsigned 8-, 16-, 32-, or 64-bit random number. """
+ return g_oMyRand.getrandbits(cBits);
+
+def randSxx(cBits):
+ """ Signed 8-, 16-, 32-, or 64-bit random number. """
+ uVal = randUxx(cBits);
+ iRet = uVal & ((1 << (cBits - 1)) - 1);
+ if iRet != uVal:
+ iRet = -iRet;
+ return iRet;
+
+def randUxxList(cBits, cElements):
+ """ List of unsigned 8-, 16-, 32-, or 64-bit random numbers. """
+ return [randUxx(cBits) for _ in range(cElements)];
+## @}
+
+
+
+
+## @name Instruction Emitter Helpers
+## @{
+
+def calcRexPrefixForTwoModRmRegs(iReg, iRm, bOtherRexPrefixes = 0):
+ """
+ Calculates a rex prefix if neccessary given the two registers
+ and optional rex size prefixes.
+ Returns an empty array if not necessary.
+ """
+ bRex = bOtherRexPrefixes;
+ if iReg >= 8:
+ bRex |= X86_OP_REX_R;
+ if iRm >= 8:
+ bRex |= X86_OP_REX_B;
+ if bRex == 0:
+ return [];
+ return [bRex,];
+
+def calcModRmForTwoRegs(iReg, iRm):
+ """
+ Calculate the RM byte for two registers.
+ Returns an array with one byte in it.
+ """
+ bRm = (0x3 << X86_MODRM_MOD_SHIFT) \
+ | ((iReg << X86_MODRM_REG_SHIFT) & X86_MODRM_REG_MASK) \
+ | (iRm & X86_MODRM_RM_MASK);
+ return [bRm,];
+
+## @}
+
+
+## @name Misc
+## @{
+
+def convU32ToSigned(u32):
+ """ Converts a 32-bit unsigned value to 32-bit signed. """
+ if u32 < 0x80000000:
+ return u32;
+ return u32 - UINT32_MAX - 1;
+
+def rotateLeftUxx(cBits, uVal, cShift):
+ """ Rotate a xx-bit wide unsigned number to the left. """
+ assert cShift < cBits;
+
+ if cBits == 16:
+ uMask = UINT16_MAX;
+ elif cBits == 32:
+ uMask = UINT32_MAX;
+ elif cBits == 64:
+ uMask = UINT64_MAX;
+ else:
+ assert cBits == 8;
+ uMask = UINT8_MAX;
+
+ uVal &= uMask;
+ uRet = (uVal << cShift) & uMask;
+ uRet |= (uVal >> (cBits - cShift));
+ return uRet;
+
+def rotateRightUxx(cBits, uVal, cShift):
+ """ Rotate a xx-bit wide unsigned number to the right. """
+ assert cShift < cBits;
+
+ if cBits == 16:
+ uMask = UINT16_MAX;
+ elif cBits == 32:
+ uMask = UINT32_MAX;
+ elif cBits == 64:
+ uMask = UINT64_MAX;
+ else:
+ assert cBits == 8;
+ uMask = UINT8_MAX;
+
+ uVal &= uMask;
+ uRet = (uVal >> cShift);
+ uRet |= (uVal << (cBits - cShift)) & uMask;
+ return uRet;
+
+def gregName(iReg, cBits, fRexByteRegs = True):
+ """ Gets the name of a general register by index and width. """
+ if cBits == 64:
+ return g_asGRegs64[iReg];
+ if cBits == 32:
+ return g_asGRegs32[iReg];
+ if cBits == 16:
+ return g_asGRegs16[iReg];
+ assert cBits == 8;
+ if fRexByteRegs:
+ return g_asGRegs8Rex[iReg];
+ return g_asGRegs8[iReg];
+
+## @}
+
+
+class TargetEnv(object):
+ """
+ Target Runtime Environment.
+ """
+
+ ## @name CPU Modes
+ ## @{
+ ksCpuMode_Real = 'real';
+ ksCpuMode_Protect = 'prot';
+ ksCpuMode_Paged = 'paged';
+ ksCpuMode_Long = 'long';
+ ksCpuMode_V86 = 'v86';
+ ## @}
+
+ ## @name Instruction set.
+ ## @{
+ ksInstrSet_16 = '16';
+ ksInstrSet_32 = '32';
+ ksInstrSet_64 = '64';
+ ## @}
+
+ def __init__(self, sName,
+ sInstrSet = ksInstrSet_32,
+ sCpuMode = ksCpuMode_Paged,
+ iRing = 3,
+ ):
+ self.sName = sName;
+ self.sInstrSet = sInstrSet;
+ self.sCpuMode = sCpuMode;
+ self.iRing = iRing;
+ self.asGRegs = g_asGRegs64 if self.is64Bit() else g_asGRegs32;
+ self.asGRegsNoSp = g_asGRegs64NoSp if self.is64Bit() else g_asGRegs32NoSp;
+
+ def isUsingIprt(self):
+ """ Whether it's an IPRT environment or not. """
+ return self.sName.startswith('iprt');
+
+ def is64Bit(self):
+ """ Whether it's a 64-bit environment or not. """
+ return self.sInstrSet == self.ksInstrSet_64;
+
+ def getDefOpBits(self):
+ """ Get the default operand size as a bit count. """
+ if self.sInstrSet == self.ksInstrSet_16:
+ return 16;
+ return 32;
+
+ def getDefOpBytes(self):
+ """ Get the default operand size as a byte count. """
+ return self.getDefOpBits() / 8;
+
+ def getMaxOpBits(self):
+ """ Get the max operand size as a bit count. """
+ if self.sInstrSet == self.ksInstrSet_64:
+ return 64;
+ return 32;
+
+ def getMaxOpBytes(self):
+ """ Get the max operand size as a byte count. """
+ return self.getMaxOpBits() / 8;
+
+ def getDefAddrBits(self):
+ """ Get the default address size as a bit count. """
+ if self.sInstrSet == self.ksInstrSet_16:
+ return 16;
+ if self.sInstrSet == self.ksInstrSet_32:
+ return 32;
+ return 64;
+
+ def getDefAddrBytes(self):
+ """ Get the default address size as a byte count. """
+ return self.getDefAddrBits() / 8;
+
+ def getGRegCount(self, cbEffBytes = 4):
+ """ Get the number of general registers. """
+ if self.sInstrSet == self.ksInstrSet_64:
+ if cbEffBytes == 1:
+ return 16 + 4;
+ return 16;
+ return 8;
+
+ def randGRegNoSp(self, cbEffBytes = 4):
+ """ Returns a random general register number, excluding the SP register. """
+ iReg = randU16() % self.getGRegCount(cbEffBytes);
+ while iReg == X86_GREG_xSP:
+ iReg = randU16() % self.getGRegCount(cbEffBytes);
+ return iReg;
+
+ def randGRegNoSpList(self, cItems, cbEffBytes = 4):
+ """ List of randGRegNoSp values. """
+ aiRegs = [];
+ for _ in range(cItems):
+ aiRegs.append(self.randGRegNoSp(cbEffBytes));
+ return aiRegs;
+
+ def getAddrModes(self):
+ """ Gets a list of addressing mode (16, 32, or/and 64). """
+ if self.sInstrSet == self.ksInstrSet_16:
+ return [16, 32];
+ if self.sInstrSet == self.ksInstrSet_32:
+ return [32, 16];
+ return [64, 32];
+
+ def is8BitHighGReg(self, cbEffOp, iGReg):
+ """ Checks if the given register is a high 8-bit general register (AH, CH, DH or BH). """
+ assert cbEffOp in [1, 2, 4, 8];
+ if cbEffOp == 1:
+ if iGReg >= 16:
+ return True;
+ if iGReg >= 4 and not self.is64Bit():
+ return True;
+ return False;
+
+ def gregNameBits(self, iReg, cBits):
+ """ Gets the name of the given register for the specified width (bits). """
+ return gregName(iReg, cBits, self.is64Bit());
+
+ def gregNameBytes(self, iReg, cbWidth):
+ """ Gets the name of the given register for the specified with (in bytes). """
+ return gregName(iReg, cbWidth * 8, self.is64Bit());
+
+
+
+
+## Target environments.
+g_dTargetEnvs = {
+ 'iprt-r3-32': TargetEnv('iprt-r3-32', TargetEnv.ksInstrSet_32, TargetEnv.ksCpuMode_Protect, 3),
+ 'iprt-r3-64': TargetEnv('iprt-r3-64', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 3),
+ 'bs2-r0-64': TargetEnv('bs2-r0-64', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 0),
+ 'bs2-r0-64-big': TargetEnv('bs2-r0-64-big', TargetEnv.ksInstrSet_64, TargetEnv.ksCpuMode_Long, 0),
+ 'bs2-r0-32-big': TargetEnv('bs2-r0-32-big', TargetEnv.ksInstrSet_32, TargetEnv.ksCpuMode_Protect, 0),
+};
+
+
+class InstrTestBase(object):
+ """
+ Base class for testing one instruction.
+ """
+
+ def __init__(self, sName, sInstr = None):
+ self.sName = sName;
+ self.sInstr = sInstr if sInstr else sName.split()[0];
+
+ def isApplicable(self, oGen):
+ """
+ Tests if the instruction test is applicable to the selected environment.
+ """
+ _ = oGen;
+ return True;
+
+ def generateTest(self, oGen, sTestFnName):
+ """
+ Emits the test assembly code.
+ """
+ oGen.write(';; @todo not implemented. This is for the linter: %s, %s\n' % (oGen, sTestFnName));
+ return True;
+
+ def generateInputs(self, cbEffOp, cbMaxOp, oGen, fLong = False):
+ """ Generate a list of inputs. """
+ if fLong:
+ #
+ # Try do extremes as well as different ranges of random numbers.
+ #
+ auRet = [0, 1, ];
+ if cbMaxOp >= 1:
+ auRet += [ UINT8_MAX / 2, UINT8_MAX / 2 + 1, UINT8_MAX ];
+ if cbMaxOp >= 2:
+ auRet += [ UINT16_MAX / 2, UINT16_MAX / 2 + 1, UINT16_MAX ];
+ if cbMaxOp >= 4:
+ auRet += [ UINT32_MAX / 2, UINT32_MAX / 2 + 1, UINT32_MAX ];
+ if cbMaxOp >= 8:
+ auRet += [ UINT64_MAX / 2, UINT64_MAX / 2 + 1, UINT64_MAX ];
+
+ if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
+ for cBits, cValues in ( (8, 4), (16, 4), (32, 8), (64, 8) ):
+ if cBits < cbMaxOp * 8:
+ auRet += randUxxList(cBits, cValues);
+ cWanted = 16;
+ elif oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
+ for cBits, cValues in ( (8, 8), (16, 8), (24, 2), (32, 16), (40, 1), (48, 1), (56, 1), (64, 16) ):
+ if cBits < cbMaxOp * 8:
+ auRet += randUxxList(cBits, cValues);
+ cWanted = 64;
+ else:
+ for cBits, cValues in ( (8, 16), (16, 16), (24, 4), (32, 64), (40, 4), (48, 4), (56, 4), (64, 64) ):
+ if cBits < cbMaxOp * 8:
+ auRet += randUxxList(cBits, cValues);
+ cWanted = 168;
+ if len(auRet) < cWanted:
+ auRet += randUxxList(cbEffOp * 8, cWanted - len(auRet));
+ else:
+ #
+ # Short list, just do some random numbers.
+ #
+ auRet = [];
+ if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
+ auRet += randUxxList(cbMaxOp, 1);
+ elif oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
+ auRet += randUxxList(cbMaxOp, 2);
+ else:
+ auRet = [];
+ for cBits in (8, 16, 32, 64):
+ if cBits < cbMaxOp * 8:
+ auRet += randUxxList(cBits, 1);
+ return auRet;
+
+
+class InstrTest_MemOrGreg_2_Greg(InstrTestBase):
+ """
+ Instruction reading memory or general register and writing the result to a
+ general register.
+ """
+
+ def __init__(self, sName, fnCalcResult, sInstr = None, acbOpVars = None):
+ InstrTestBase.__init__(self, sName, sInstr);
+ self.fnCalcResult = fnCalcResult;
+ self.acbOpVars = [ 1, 2, 4, 8 ] if not acbOpVars else list(acbOpVars);
+ self.fTestRegForm = True;
+ self.fTestMemForm = True;
+
+ ## @name Test Instruction Writers
+ ## @{
+
+ def writeInstrGregGreg(self, cbEffOp, iOp1, iOp2, oGen):
+ """ Writes the instruction with two general registers as operands. """
+ oGen.write(' %s %s, %s\n'
+ % ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp),));
+ return True;
+
+ def writeInstrGregPureRM(self, cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen):
+ """ Writes the instruction with two general registers as operands. """
+ oGen.write(' ');
+ if iOp2 == 13 and iMod == 0 and cAddrBits == 64:
+ oGen.write('altrexb '); # Alternative encoding for rip relative addressing.
+ oGen.write('%s %s, [' % (self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp),));
+ if (iOp2 == 5 or iOp2 == 13) and iMod == 0:
+ oGen.write('VBINSTST_NAME(g_u%sData)' % (cbEffOp * 8,))
+ if oGen.oTarget.is64Bit():
+ oGen.write(' wrt rip');
+ else:
+ if iMod == 1:
+ oGen.write('byte %d + ' % (offDisp,));
+ elif iMod == 2:
+ oGen.write('dword %d + ' % (offDisp,));
+ else:
+ assert iMod == 0;
+
+ if cAddrBits == 64:
+ oGen.write(g_asGRegs64[iOp2]);
+ elif cAddrBits == 32:
+ oGen.write(g_asGRegs32[iOp2]);
+ elif cAddrBits == 16:
+ assert False; ## @todo implement 16-bit addressing.
+ else:
+ assert False, str(cAddrBits);
+
+ oGen.write(']\n');
+ return True;
+
+ def writeInstrGregSibLabel(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
+ """ Writes the instruction taking a register and a label (base only w/o reg), SIB form. """
+ assert offDisp is None; assert iBaseReg in [5, 13]; assert iIndexReg == 4; assert cAddrBits != 16;
+ if cAddrBits == 64:
+ # Note! Cannot test this in 64-bit mode in any sensible way because the disp is 32-bit
+ # and we cannot (yet) make assumtions about where we're loaded.
+ ## @todo Enable testing this in environments where we can make assumptions (boot sector).
+ oGen.write(' %s %s, [VBINSTST_NAME(g_u%sData) xWrtRIP]\n'
+ % ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), cbEffOp * 8,));
+ else:
+ oGen.write(' altsibx%u %s %s, [VBINSTST_NAME(g_u%sData) xWrtRIP] ; iOp1=%s cbEffOp=%s\n'
+ % ( iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), cbEffOp * 8, iOp1, cbEffOp));
+ return True;
+
+ def writeInstrGregSibScaledReg(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
+ """ Writes the instruction taking a register and disp+scaled register (no base reg), SIB form. """
+ assert iBaseReg in [5, 13]; assert iIndexReg != 4; assert cAddrBits != 16;
+ # Note! Using altsibxN to force scaled encoding. This is only really a
+ # necessity for iScale=1, but doesn't hurt for the rest.
+ oGen.write(' altsibx%u %s %s, [%s * %#x'
+ % (iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBits(iIndexReg, cAddrBits), iScale,));
+ if offDisp is not None:
+ oGen.write(' + %#x' % (offDisp,));
+ oGen.write(']\n');
+ _ = iBaseReg;
+ return True;
+
+ def writeInstrGregSibBase(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
+ """ Writes the instruction taking a register and base only (with reg), SIB form. """
+ oGen.write(' altsibx%u %s %s, [%s'
+ % (iScale, self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBits(iBaseReg, cAddrBits),));
+ if offDisp is not None:
+ oGen.write(' + %#x' % (offDisp,));
+ oGen.write(']\n');
+ _ = iIndexReg;
+ return True;
+
+ def writeInstrGregSibBaseAndScaledReg(self, cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen):
+ """ Writes tinstruction taking a register and full featured SIB form address. """
+ # Note! From the looks of things, yasm will encode the following instructions the same way:
+ # mov eax, [rsi*1 + rbx]
+ # mov eax, [rbx + rsi*1]
+ # So, when there are two registers involved, the '*1' selects
+ # which is index and which is base.
+ oGen.write(' %s %s, [%s + %s * %u'
+ % ( self.sInstr, oGen.gregNameBytes(iOp1, cbEffOp),
+ oGen.gregNameBits(iBaseReg, cAddrBits), oGen.gregNameBits(iIndexReg, cAddrBits), iScale,));
+ if offDisp is not None:
+ oGen.write(' + %#x' % (offDisp,));
+ oGen.write(']\n');
+ return True;
+
+ ## @}
+
+
+ ## @name Memory setups
+ ## @{
+
+ def generateMemSetupReadByLabel(self, oGen, cbEffOp, uInput):
+ """ Sets up memory for a memory read. """
+ oGen.pushConst(uInput);
+ oGen.write(' call VBINSTST_NAME(Common_SetupMemReadU%u)\n' % (cbEffOp*8,));
+ return True;
+
+ def generateMemSetupReadByReg(self, oGen, cAddrBits, cbEffOp, iReg1, uInput, offDisp = None):
+ """ Sets up memory for a memory read indirectly addressed thru one register and optional displacement. """
+ oGen.pushConst(uInput);
+ oGen.write(' call VBINSTST_NAME(%s)\n'
+ % (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iBaseReg = iReg1, offDisp = offDisp),));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iReg1],));
+ return True;
+
+ def generateMemSetupReadByScaledReg(self, oGen, cAddrBits, cbEffOp, iIndexReg, iScale, uInput, offDisp = None):
+ """ Sets up memory for a memory read indirectly addressed thru one register and optional displacement. """
+ oGen.pushConst(uInput);
+ oGen.write(' call VBINSTST_NAME(%s)\n'
+ % (oGen.needGRegMemSetup(cAddrBits, cbEffOp, offDisp = offDisp, iIndexReg = iIndexReg, iScale = iScale),));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iIndexReg],));
+ return True;
+
+ def generateMemSetupReadByBaseAndScaledReg(self, oGen, cAddrBits, cbEffOp, iBaseReg, iIndexReg, iScale, uInput, offDisp):
+ """ Sets up memory for a memory read indirectly addressed thru two registers with optional displacement. """
+ oGen.pushConst(uInput);
+ oGen.write(' call VBINSTST_NAME(%s)\n'
+ % (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iBaseReg = iBaseReg, offDisp = offDisp,
+ iIndexReg = iIndexReg, iScale = iScale),));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iIndexReg],));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iBaseReg],));
+ return True;
+
+ def generateMemSetupPureRM(self, oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp = None):
+ """ Sets up memory for a pure R/M addressed read, iOp2 being the R/M value. """
+ oGen.pushConst(uInput);
+ assert offDisp is None or iMod != 0;
+ if (iOp2 != 5 and iOp2 != 13) or iMod != 0:
+ oGen.write(' call VBINSTST_NAME(%s)\n'
+ % (oGen.needGRegMemSetup(cAddrBits, cbEffOp, iOp2, offDisp),));
+ else:
+ oGen.write(' call VBINSTST_NAME(Common_SetupMemReadU%u)\n' % (cbEffOp*8,));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
+ return True;
+
+ ## @}
+
+ def generateOneStdTestGregGreg(self, oGen, cbEffOp, cbMaxOp, iOp1, iOp1X, iOp2, iOp2X, uInput, uResult):
+ """ Generate one standard instr greg,greg test. """
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uInput,));
+ if iOp1X != iOp2X:
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
+ self.writeInstrGregGreg(cbEffOp, iOp1, iOp2, oGen);
+ oGen.pushConst(uResult);
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1X, iOp2X if iOp1X != iOp2X else None),));
+ _ = cbMaxOp;
+ return True;
+
+ def generateOneStdTestGregGreg8BitHighPain(self, oGen, cbEffOp, cbMaxOp, iOp1, iOp2, uInput):
+ """ High 8-bit registers are a real pain! """
+ assert oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) or oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2);
+ # Figure out the register indexes of the max op sized regs involved.
+ iOp1X = iOp1 & 3;
+ iOp2X = iOp2 & 3;
+ oGen.write(' ; iOp1=%u iOp1X=%u iOp2=%u iOp2X=%u\n' % (iOp1, iOp1X, iOp2, iOp2X,));
+
+ # Calculate unshifted result.
+ if iOp1X != iOp2X:
+ uCur = oGen.auRegValues[iOp1X];
+ if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
+ uCur = rotateRightUxx(cbMaxOp * 8, uCur, 8);
+ else:
+ uCur = uInput;
+ if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) != oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
+ if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
+ uCur = rotateRightUxx(cbMaxOp * 8, uCur, 8);
+ else:
+ uCur = rotateLeftUxx(cbMaxOp * 8, uCur, 8);
+ uResult = self.fnCalcResult(cbEffOp, uInput, uCur, oGen);
+
+
+ # Rotate the input and/or result to match their max-op-sized registers.
+ if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
+ uInput = rotateLeftUxx(cbMaxOp * 8, uInput, 8);
+ if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1):
+ uResult = rotateLeftUxx(cbMaxOp * 8, uResult, 8);
+
+ # Hand it over to an overridable worker method.
+ return self.generateOneStdTestGregGreg(oGen, cbEffOp, cbMaxOp, iOp1, iOp1X, iOp2, iOp2X, uInput, uResult);
+
+
+ def generateOneStdTestGregMemNoSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iOp2, uInput, uResult):
+ """ Generate mode 0, 1 and 2 test for the R/M=iOp2. """
+ if cAddrBits == 16:
+ _ = cbMaxOp;
+ else:
+ iMod = 0; # No disp, except for i=5.
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput);
+ self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, None, oGen);
+ oGen.pushConst(uResult);
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
+
+ if iOp2 != 5 and iOp2 != 13:
+ iMod = 1;
+ for offDisp in oGen.getDispForMod(iMod):
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp);
+ self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen);
+ oGen.pushConst(uResult);
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
+
+ iMod = 2;
+ for offDisp in oGen.getDispForMod(iMod):
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ self.generateMemSetupPureRM(oGen, cAddrBits, cbEffOp, iOp2, iMod, uInput, offDisp);
+ self.writeInstrGregPureRM(cbEffOp, iOp1, cAddrBits, iOp2, iMod, offDisp, oGen);
+ oGen.pushConst(uResult);
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(iOp1, iOp2),));
+
+ return True;
+
+ def generateOneStdTestGregMemSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iMod, # pylint: disable=R0913
+ iBaseReg, iIndexReg, iScale, uInput, uResult):
+ """ Generate one SIB variations. """
+ for offDisp in oGen.getDispForMod(iMod, cbEffOp):
+ if ((iBaseReg == 5 or iBaseReg == 13) and iMod == 0):
+ if iIndexReg == 4:
+ if cAddrBits == 64:
+ continue; # skipping.
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ self.generateMemSetupReadByLabel(oGen, cbEffOp, uInput);
+ self.writeInstrGregSibLabel(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
+ sChecker = oGen.needGRegChecker(iOp1);
+ else:
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ self.generateMemSetupReadByScaledReg(oGen, cAddrBits, cbEffOp, iIndexReg, iScale, uInput, offDisp);
+ self.writeInstrGregSibScaledReg(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
+ sChecker = oGen.needGRegChecker(iOp1, iIndexReg);
+ else:
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ if iIndexReg == 4:
+ self.generateMemSetupReadByReg(oGen, cAddrBits, cbEffOp, iBaseReg, uInput, offDisp);
+ self.writeInstrGregSibBase(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
+ sChecker = oGen.needGRegChecker(iOp1, iBaseReg);
+ else:
+ if iIndexReg == iBaseReg and iScale == 1 and offDisp is not None and (offDisp & 1):
+ if offDisp < 0: offDisp += 1;
+ else: offDisp -= 1;
+ self.generateMemSetupReadByBaseAndScaledReg(oGen, cAddrBits, cbEffOp, iBaseReg,
+ iIndexReg, iScale, uInput, offDisp);
+ self.writeInstrGregSibBaseAndScaledReg(cbEffOp, iOp1, cAddrBits, iBaseReg, iIndexReg, iScale, offDisp, oGen);
+ sChecker = oGen.needGRegChecker(iOp1, iBaseReg, iIndexReg);
+ oGen.pushConst(uResult);
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (sChecker,));
+ _ = cbMaxOp;
+ return True;
+
+ def generateStdTestGregMemSib(self, oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, auInputs):
+ """ Generate all SIB variations for the given iOp1 (reg) value. """
+ assert cAddrBits in [32, 64];
+ i = oGen.cSibBasePerRun;
+ while i > 0:
+ oGen.iSibBaseReg = (oGen.iSibBaseReg + 1) % oGen.oTarget.getGRegCount(cAddrBits / 8);
+ if oGen.iSibBaseReg == X86_GREG_xSP: # no RSP testing atm.
+ continue;
+
+ j = oGen.getSibIndexPerRun();
+ while j > 0:
+ oGen.iSibIndexReg = (oGen.iSibIndexReg + 1) % oGen.oTarget.getGRegCount(cAddrBits / 8);
+ if oGen.iSibIndexReg == iOp1 and oGen.iSibIndexReg != 4 and cAddrBits != cbMaxOp:
+ continue; # Don't know the high bit of the address ending up the result - skip it for now.
+
+ for iMod in [0, 1, 2]:
+ if oGen.iSibBaseReg == iOp1 \
+ and ((oGen.iSibBaseReg != 5 and oGen.iSibBaseReg != 13) or iMod != 0) \
+ and cAddrBits != cbMaxOp:
+ continue; # Don't know the high bit of the address ending up the result - skip it for now.
+
+ for _ in oGen.oSibScaleRange:
+ oGen.iSibScale *= 2;
+ if oGen.iSibScale > 8:
+ oGen.iSibScale = 1;
+
+ for uInput in auInputs:
+ oGen.newSubTest();
+ uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[iOp1], oGen);
+ self.generateOneStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, iOp1, iMod,
+ oGen.iSibBaseReg, oGen.iSibIndexReg, oGen.iSibScale,
+ uInput, uResult);
+ j -= 1;
+ i -= 1;
+
+ return True;
+
+
+ def generateStandardTests(self, oGen):
+ """ Generate standard tests. """
+
+ # Parameters.
+ cbDefOp = oGen.oTarget.getDefOpBytes();
+ cbMaxOp = oGen.oTarget.getMaxOpBytes();
+ auShortInputs = self.generateInputs(cbDefOp, cbMaxOp, oGen);
+ auLongInputs = self.generateInputs(cbDefOp, cbMaxOp, oGen, fLong = True);
+ iLongOp1 = oGen.oTarget.randGRegNoSp();
+ iLongOp2 = oGen.oTarget.randGRegNoSp();
+
+ # Register tests
+ if self.fTestRegForm:
+ for cbEffOp in self.acbOpVars:
+ if cbEffOp > cbMaxOp:
+ continue;
+ oOp2Range = range(oGen.oTarget.getGRegCount(cbEffOp));
+ if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
+ oOp2Range = [iLongOp2,];
+ oGen.write('; cbEffOp=%u\n' % (cbEffOp,));
+
+ for iOp1 in range(oGen.oTarget.getGRegCount(cbEffOp)):
+ if iOp1 == X86_GREG_xSP:
+ continue; # Cannot test xSP atm.
+ for iOp2 in oOp2Range:
+ if (iOp2 >= 16 and iOp1 in range(4, 16)) \
+ or (iOp1 >= 16 and iOp2 in range(4, 16)):
+ continue; # Any REX encoding turns AH,CH,DH,BH regs into SPL,BPL,SIL,DIL.
+ if iOp2 == X86_GREG_xSP:
+ continue; # Cannot test xSP atm.
+
+ oGen.write('; iOp2=%u cbEffOp=%u\n' % (iOp2, cbEffOp));
+ for uInput in (auLongInputs if iOp1 == iLongOp1 and iOp2 == iLongOp2 else auShortInputs):
+ oGen.newSubTest();
+ if not oGen.oTarget.is8BitHighGReg(cbEffOp, iOp1) and not oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2):
+ uCur = oGen.auRegValues[iOp1 & 15] if iOp1 != iOp2 else uInput;
+ uResult = self.fnCalcResult(cbEffOp, uInput, uCur, oGen);
+ self.generateOneStdTestGregGreg(oGen, cbEffOp, cbMaxOp, iOp1, iOp1 & 15, iOp2, iOp2 & 15,
+ uInput, uResult);
+ else:
+ self.generateOneStdTestGregGreg8BitHighPain(oGen, cbEffOp, cbMaxOp, iOp1, iOp2, uInput);
+
+ # Memory test.
+ if self.fTestMemForm:
+ for cAddrBits in oGen.oTarget.getAddrModes():
+ for cbEffOp in self.acbOpVars:
+ if cbEffOp > cbMaxOp:
+ continue;
+
+ for _ in oGen.getModRegRange(cbEffOp):
+ oGen.iModReg = (oGen.iModReg + 1) % oGen.oTarget.getGRegCount(cbEffOp);
+ if oGen.iModReg == X86_GREG_xSP:
+ continue; # Cannot test xSP atm.
+ if oGen.iModReg > 15:
+ continue; ## TODO AH,CH,DH,BH
+
+ auInputs = auLongInputs if oGen.iModReg == iLongOp1 else auShortInputs;
+ for _ in oGen.oModRmRange:
+ oGen.iModRm = (oGen.iModRm + 1) % oGen.oTarget.getGRegCount(cAddrBits * 8);
+ if oGen.iModRm != 4 or cAddrBits == 16:
+ for uInput in auInputs:
+ oGen.newSubTest();
+ if oGen.iModReg == oGen.iModRm and oGen.iModRm != 5 \
+ and oGen.iModRm != 13 and cbEffOp != cbMaxOp:
+ continue; # Don't know the high bit of the address ending up the result - skip it for now.
+ uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[oGen.iModReg & 15], oGen);
+ self.generateOneStdTestGregMemNoSib(oGen, cAddrBits, cbEffOp, cbMaxOp,
+ oGen.iModReg, oGen.iModRm, uInput, uResult);
+ else:
+ # SIB - currently only short list of inputs or things may get seriously out of hand.
+ self.generateStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, oGen.iModReg, auShortInputs);
+ return True;
+
+ def generateTest(self, oGen, sTestFnName):
+ oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
+
+ self.generateStandardTests(oGen);
+
+ oGen.write(' ret\n');
+ oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
+ return True;
+
+
+
+class InstrTest_Mov_Gv_Ev(InstrTest_MemOrGreg_2_Greg):
+ """
+ Tests MOV Gv,Ev.
+ """
+ def __init__(self):
+ InstrTest_MemOrGreg_2_Greg.__init__(self, 'mov Gv,Ev', self.calc_mov);
+
+ @staticmethod
+ def calc_mov(cbEffOp, uInput, uCur, oGen):
+ """ Calculates the result of a mov instruction."""
+ if cbEffOp == 8:
+ return uInput & UINT64_MAX;
+ if cbEffOp == 4:
+ return uInput & UINT32_MAX;
+ if cbEffOp == 2:
+ return (uCur & 0xffffffffffff0000) | (uInput & UINT16_MAX);
+ assert cbEffOp == 1; _ = oGen;
+ return (uCur & 0xffffffffffffff00) | (uInput & UINT8_MAX);
+
+
+class InstrTest_MovSxD_Gv_Ev(InstrTest_MemOrGreg_2_Greg):
+ """
+ Tests MOVSXD Gv,Ev.
+ """
+ def __init__(self):
+ InstrTest_MemOrGreg_2_Greg.__init__(self, 'movsxd Gv,Ev', self.calc_movsxd, acbOpVars = [ 8, 4, 2, ]);
+ self.fTestMemForm = False; # drop this...
+
+ def writeInstrGregGreg(self, cbEffOp, iOp1, iOp2, oGen):
+ """ Writes the instruction with two general registers as operands. """
+ if cbEffOp == 8:
+ oGen.write(' movsxd %s, %s\n'
+ % ( oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp / 2),));
+ else:
+ oGen.write(' oddmovsxd %s, %s\n'
+ % ( oGen.gregNameBytes(iOp1, cbEffOp), oGen.gregNameBytes(iOp2, cbEffOp),));
+ return True;
+
+ def isApplicable(self, oGen):
+ return oGen.oTarget.is64Bit();
+
+ @staticmethod
+ def calc_movsxd(cbEffOp, uInput, uCur, oGen):
+ """
+ Calculates the result of a movxsd instruction.
+ Returns the result value (cbMaxOp sized).
+ """
+ _ = oGen;
+ if cbEffOp == 8 and (uInput & RT_BIT_32(31)):
+ return (UINT32_MAX << 32) | (uInput & UINT32_MAX);
+ if cbEffOp == 2:
+ return (uCur & 0xffffffffffff0000) | (uInput & 0xffff);
+ return uInput & UINT32_MAX;
+
+
+class InstrTest_DivIDiv(InstrTestBase):
+ """
+ Tests IDIV and DIV instructions.
+ """
+
+ def __init__(self, fIsIDiv):
+ if not fIsIDiv:
+ InstrTestBase.__init__(self, 'div Gv,Ev', 'div');
+ else:
+ InstrTestBase.__init__(self, 'idiv Gv,Ev', 'idiv');
+ self.fIsIDiv = fIsIDiv;
+
+ def generateInputEdgeCases(self, cbEffOp, fLong, fXcpt):
+ """ Generate edge case inputs for cbEffOp. Returns a list of pairs, dividen + divisor. """
+ # Test params.
+ uStep = 1 << (cbEffOp * 8);
+ if self.fIsIDiv:
+ uStep /= 2;
+
+ # edge tests
+ auRet = [];
+
+ uDivisor = 1 if fLong else 3;
+ uDividend = uStep * uDivisor - 1;
+ for i in range(5 if fLong else 3):
+ auRet.append([uDividend + fXcpt, uDivisor]);
+ if self.fIsIDiv:
+ auRet.append([-uDividend - fXcpt, -uDivisor]);
+ auRet.append([-(uDividend + uDivisor + fXcpt), uDivisor]);
+ auRet.append([ (uDividend + uDivisor + fXcpt), -uDivisor]);
+ if i <= 3 and fLong:
+ auRet.append([uDividend - 1 + fXcpt*3, uDivisor]);
+ if self.fIsIDiv:
+ auRet.append([-(uDividend - 1 + fXcpt*3), -uDivisor]);
+ uDivisor += 1;
+ uDividend += uStep;
+
+ uDivisor = uStep - 1;
+ uDividend = uStep * uDivisor - 1;
+ for _ in range(3 if fLong else 1):
+ auRet.append([uDividend + fXcpt, uDivisor]);
+ if self.fIsIDiv:
+ auRet.append([-uDividend - fXcpt, -uDivisor]);
+ uDivisor -= 1;
+ uDividend -= uStep;
+
+ if self.fIsIDiv:
+ uDivisor = -uStep;
+ for _ in range(3 if fLong else 1):
+ auRet.append([uDivisor * (-uStep - 1) - (not fXcpt), uDivisor]);
+ uDivisor += 1
+ uDivisor = uStep - 1;
+ for _ in range(3 if fLong else 1):
+ auRet.append([-(uDivisor * (uStep + 1) - (not fXcpt)), uDivisor]);
+ uDivisor -= 1
+
+ return auRet;
+
+ def generateInputsNoXcpt(self, cbEffOp, fLong = False):
+ """ Generate inputs for cbEffOp. Returns a list of pairs, dividen + divisor. """
+ # Test params.
+ uStep = 1 << (cbEffOp * 8);
+ if self.fIsIDiv:
+ uStep /= 2;
+
+ # edge tests
+ auRet = self.generateInputEdgeCases(cbEffOp, fLong, False)
+
+ # random tests.
+ if self.fIsIDiv:
+ for _ in range(6 if fLong else 2):
+ while True:
+ uDivisor = randSxx(cbEffOp * 8);
+ if uDivisor == 0 or uDivisor >= uStep or uDivisor < -uStep:
+ continue;
+ uDividend = randSxx(cbEffOp * 16);
+ uResult = uDividend / uDivisor;
+ if uResult >= uStep or uResult <= -uStep: # exclude difficulties
+ continue;
+ break;
+ auRet.append([uDividend, uDivisor]);
+ else:
+ for _ in range(6 if fLong else 2):
+ while True:
+ uDivisor = randUxx(cbEffOp * 8);
+ if uDivisor == 0 or uDivisor >= uStep:
+ continue;
+ uDividend = randUxx(cbEffOp * 16);
+ uResult = uDividend / uDivisor;
+ if uResult >= uStep:
+ continue;
+ break;
+ auRet.append([uDividend, uDivisor]);
+
+ return auRet;
+
+ def generateOneStdTestGreg(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
+ """ Generate code of one '[I]DIV rDX:rAX,<GREG>' test. """
+ cbMaxOp = oGen.oTarget.getMaxOpBytes();
+ fEffOp = ((1 << (cbEffOp *8) ) - 1);
+ fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
+ fTopOp = fMaxOp - fEffOp;
+ fFullOp1 = ((1 << (cbEffOp*16)) - 1);
+
+ uAX = iDividend & fFullOp1; # full with unsigned
+ uDX = uAX >> (cbEffOp*8);
+ uAX &= fEffOp;
+ uOp2Val = iDivisor & fEffOp;
+
+ iQuotient = iDividend / iDivisor;
+ iReminder = iDividend % iDivisor;
+ if iReminder != 0 and iQuotient < 0: # python has different rounding rules for negative division.
+ iQuotient += 1;
+ iReminder -= iDivisor;
+ uAXResult = iQuotient & fEffOp;
+ uDXResult = iReminder & fEffOp;
+
+ if cbEffOp < cbMaxOp:
+ uAX |= randUxx(cbMaxOp * 8) & fTopOp;
+ uDX |= randUxx(cbMaxOp * 8) & fTopOp;
+ uOp2Val |= randUxx(cbMaxOp * 8) & fTopOp;
+ if cbEffOp < 4:
+ uAXResult |= uAX & fTopOp;
+ uDXResult |= uDX & fTopOp;
+ oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
+ ' ; iQuotient=%#x (%d) iReminder=%#x (%d)\n'
+ % ( iDividend & fFullOp1, iDividend, iDivisor & fEffOp, iDivisor,
+ iQuotient & fEffOp, iQuotient, iReminder & fEffOp, iReminder, ));
+
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX], uDX,));
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2], uOp2Val,));
+
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
+ oGen.pushConst(uDXResult);
+ oGen.pushConst(uAXResult);
+
+ oGen.write(' %-4s %s\n' % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, X86_GREG_xDX, iOp2),));
+ return True;
+
+ def generateOneStdTestGreg8Bit(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
+ """ Generate code of one '[I]DIV AX,<GREG>' test (8-bit). """
+ cbMaxOp = oGen.oTarget.getMaxOpBytes();
+ fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
+ iOp2X = (iOp2 & 3) if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2) else iOp2;
+ assert iOp2X != X86_GREG_xAX;
+
+ uAX = iDividend & UINT16_MAX; # full with unsigned
+ uOp2Val = iDivisor & UINT8_MAX;
+
+ iQuotient = iDividend / iDivisor;
+ iReminder = iDividend % iDivisor;
+ if iReminder != 0 and iQuotient < 0: # python has different rounding rules for negative division.
+ iQuotient += 1;
+ iReminder -= iDivisor;
+ uAXResult = (iQuotient & UINT8_MAX) | ((iReminder & UINT8_MAX) << 8);
+
+ uAX |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT16_MAX);
+ uAXResult |= uAX & (fMaxOp - UINT16_MAX);
+ uOp2Val |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT8_MAX);
+ if iOp2X != iOp2:
+ uOp2Val = rotateLeftUxx(cbMaxOp * 8, uOp2Val, 8);
+ oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
+ ' ; iQuotient=%#x (%d) iReminder=%#x (%d)\n'
+ % ( iDividend & UINT16_MAX, iDividend, iDivisor & UINT8_MAX, iDivisor,
+ iQuotient & UINT8_MAX, iQuotient, iReminder & UINT8_MAX, iReminder, ));
+
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uOp2Val,));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
+ oGen.pushConst(uAXResult);
+
+ oGen.write(' %-4s %s\n' % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, iOp2X),));
+ return;
+
+
+ def generateStandardTests(self, oGen):
+ """ Generates test that causes no exceptions. """
+
+ # Parameters.
+ iLongOp2 = oGen.oTarget.randGRegNoSp();
+
+ # Register tests
+ if True:
+ for cbEffOp in ( 8, 4, 2, 1 ):
+ if cbEffOp > oGen.oTarget.getMaxOpBytes():
+ continue;
+ oGen.write('; cbEffOp=%u\n' % (cbEffOp,));
+ oOp2Range = range(oGen.oTarget.getGRegCount(cbEffOp));
+ if oGen.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
+ oOp2Range = [iLongOp2,];
+ for iOp2 in oOp2Range:
+ if iOp2 == X86_GREG_xSP:
+ continue; # Cannot test xSP atm.
+ if iOp2 == X86_GREG_xAX or (cbEffOp > 1 and iOp2 == X86_GREG_xDX):
+ continue; # Will overflow or be too complicated to get right.
+ if cbEffOp == 1 and iOp2 == (16 if oGen.oTarget.is64Bit() else 4):
+ continue; # Avoid dividing by AH, same reasons as above.
+
+ for iDividend, iDivisor in self.generateInputsNoXcpt(cbEffOp, iOp2 == iLongOp2):
+ oGen.newSubTest();
+ if cbEffOp > 1:
+ self.generateOneStdTestGreg(oGen, cbEffOp, iOp2, iDividend, iDivisor);
+ else:
+ self.generateOneStdTestGreg8Bit(oGen, cbEffOp, iOp2, iDividend, iDivisor);
+
+ ## Memory test.
+ #if False:
+ # for cAddrBits in oGen.oTarget.getAddrModes():
+ # for cbEffOp in self.acbOpVars:
+ # if cbEffOp > cbMaxOp:
+ # continue;
+ #
+ # auInputs = auLongInputs if oGen.iModReg == iLongOp1 else auShortInputs;
+ # for _ in oGen.oModRmRange:
+ # oGen.iModRm = (oGen.iModRm + 1) % oGen.oTarget.getGRegCount(cAddrBits * 8);
+ # if oGen.iModRm != 4 or cAddrBits == 16:
+ # for uInput in auInputs:
+ # oGen.newSubTest();
+ # if oGen.iModReg == oGen.iModRm and oGen.iModRm != 5 and oGen.iModRm != 13 and cbEffOp != cbMaxOp:
+ # continue; # Don't know the high bit of the address ending up the result - skip it for now.
+ # uResult = self.fnCalcResult(cbEffOp, uInput, oGen.auRegValues[oGen.iModReg & 15], oGen);
+ # self.generateOneStdTestGregMemNoSib(oGen, cAddrBits, cbEffOp, cbMaxOp,
+ # oGen.iModReg, oGen.iModRm, uInput, uResult);
+ # else:
+ # # SIB - currently only short list of inputs or things may get seriously out of hand.
+ # self.generateStdTestGregMemSib(oGen, cAddrBits, cbEffOp, cbMaxOp, oGen.iModReg, auShortInputs);
+ #
+ return True;
+
+ def generateInputsXcpt(self, cbEffOp, fLong = False):
+ """
+ Generate inputs for cbEffOp that will overflow or underflow.
+ Returns a list of pairs, dividen + divisor.
+ """
+ # Test params.
+ uStep = 1 << (cbEffOp * 8);
+ if self.fIsIDiv:
+ uStep /= 2;
+
+ # edge tests
+ auRet = self.generateInputEdgeCases(cbEffOp, fLong, True);
+ auRet.extend([[0, 0], [1, 0], [ uStep * uStep / 2 - 1, 0]]);
+
+ # random tests.
+ if self.fIsIDiv:
+ for _ in range(6 if fLong else 2):
+ while True:
+ uDivisor = randSxx(cbEffOp * 8);
+ uDividend = randSxx(cbEffOp * 16);
+ if uDivisor >= uStep or uDivisor < -uStep:
+ continue;
+ if uDivisor != 0:
+ uResult = uDividend / uDivisor;
+ if (uResult <= uStep and uResult >= 0) or (uResult >= -uStep and uResult < 0):
+ continue; # exclude difficulties
+ break;
+ auRet.append([uDividend, uDivisor]);
+ else:
+ for _ in range(6 if fLong else 2):
+ while True:
+ uDivisor = randUxx(cbEffOp * 8);
+ uDividend = randUxx(cbEffOp * 16);
+ if uDivisor >= uStep:
+ continue;
+ if uDivisor != 0:
+ uResult = uDividend / uDivisor;
+ if uResult < uStep:
+ continue;
+ break;
+ auRet.append([uDividend, uDivisor]);
+
+ return auRet;
+
+ def generateOneDivideErrorTestGreg(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
+ """ Generate code of one '[I]DIV rDX:rAX,<GREG>' test that causes #DE. """
+ cbMaxOp = oGen.oTarget.getMaxOpBytes();
+ fEffOp = ((1 << (cbEffOp *8) ) - 1);
+ fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
+ fTopOp = fMaxOp - fEffOp;
+ fFullOp1 = ((1 << (cbEffOp*16)) - 1);
+
+ uAX = iDividend & fFullOp1; # full with unsigned
+ uDX = uAX >> (cbEffOp*8);
+ uAX &= fEffOp;
+ uOp2Val = iDivisor & fEffOp;
+
+ if cbEffOp < cbMaxOp:
+ uAX |= randUxx(cbMaxOp * 8) & fTopOp;
+ uDX |= randUxx(cbMaxOp * 8) & fTopOp;
+ uOp2Val |= randUxx(cbMaxOp * 8) & fTopOp;
+ oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
+ % ( iDividend & fFullOp1, iDividend, iDivisor & fEffOp, iDivisor,));
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX], uDX,));
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2], uOp2Val,));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2],));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[X86_GREG_xDX],));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX],));
+ oGen.write(' VBINSTST_TRAP_INSTR X86_XCPT_DE, 0, %-4s %s\n'
+ % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, X86_GREG_xDX, iOp2),));
+ return True;
+
+ def generateOneDivideErrorTestGreg8Bit(self, oGen, cbEffOp, iOp2, iDividend, iDivisor):
+ """ Generate code of one '[I]DIV AX,<GREG>' test that causes #DE (8-bit). """
+ if not oGen.oTarget.is64Bit() and iOp2 == 4: # Avoid AH.
+ iOp2 = 5;
+
+ cbMaxOp = oGen.oTarget.getMaxOpBytes();
+ fMaxOp = UINT64_MAX if cbMaxOp == 8 else UINT32_MAX; assert cbMaxOp in [8, 4];
+ iOp2X = (iOp2 & 3) if oGen.oTarget.is8BitHighGReg(cbEffOp, iOp2) else iOp2;
+ assert iOp2X != X86_GREG_xAX;
+
+ uAX = iDividend & UINT16_MAX; # full with unsigned
+ uOp2Val = iDivisor & UINT8_MAX;
+
+ uAX |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT16_MAX);
+ uOp2Val |= randUxx(cbMaxOp * 8) & (fMaxOp - UINT8_MAX);
+ if iOp2X != iOp2:
+ uOp2Val = rotateLeftUxx(cbMaxOp * 8, uOp2Val, 8);
+ oGen.write(' ; iDividend=%#x (%d) iDivisor=%#x (%d)\n'
+ % ( iDividend & UINT16_MAX, iDividend, iDivisor & UINT8_MAX, iDivisor,));
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[X86_GREG_xAX], uAX,));
+ oGen.write(' mov %s, 0x%x\n' % (oGen.oTarget.asGRegs[iOp2X], uOp2Val,));
+ oGen.write(' push %s\n' % (oGen.oTarget.asGRegs[iOp2X],));
+ oGen.write(' push sAX\n');
+ oGen.write(' VBINSTST_TRAP_INSTR X86_XCPT_DE, 0, %-4s %s\n'
+ % (self.sInstr, oGen.gregNameBytes(iOp2, cbEffOp),));
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needGRegChecker(X86_GREG_xAX, iOp2X),));
+ return;
+
+ def generateDivideErrorTests(self, oGen):
+ """ Generate divide error tests (raises X86_XCPT_DE). """
+ oGen.write('%ifdef VBINSTST_CAN_DO_TRAPS\n');
+
+ # We do one register variation here, assuming the standard test has got them covered.
+ # Register tests
+ if True:
+ iOp2 = oGen.oTarget.randGRegNoSp();
+ while iOp2 == X86_GREG_xAX or iOp2 == X86_GREG_xDX:
+ iOp2 = oGen.oTarget.randGRegNoSp();
+
+ for cbEffOp in ( 8, 4, 2, 1 ):
+ if cbEffOp > oGen.oTarget.getMaxOpBytes():
+ continue;
+ oGen.write('; cbEffOp=%u iOp2=%u\n' % (cbEffOp, iOp2,));
+
+ for iDividend, iDivisor in self.generateInputsXcpt(cbEffOp, fLong = not oGen.isTiny()):
+ oGen.newSubTest();
+ if cbEffOp > 1:
+ self.generateOneDivideErrorTestGreg(oGen, cbEffOp, iOp2, iDividend, iDivisor);
+ else:
+ self.generateOneDivideErrorTestGreg8Bit(oGen, cbEffOp, iOp2, iDividend, iDivisor);
+
+ oGen.write('%endif ; VBINSTST_CAN_DO_TRAPS\n');
+ return True;
+
+
+ def generateTest(self, oGen, sTestFnName):
+ oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
+ #oGen.write(' int3\n');
+
+ self.generateStandardTests(oGen);
+ self.generateDivideErrorTests(oGen);
+
+ #oGen.write(' int3\n');
+ oGen.write(' ret\n');
+ oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
+ return True;
+
+
+
+class InstrTest_DaaDas(InstrTestBase):
+ """ Tests the DAA and DAS instructions. """
+
+ def __init__(self, fIsDas):
+ InstrTestBase.__init__(self, 'das' if fIsDas else 'daa');
+ self.fIsDas = fIsDas;
+
+ def isApplicable(self, oGen):
+ return not oGen.oTarget.is64Bit();
+
+ def generateTest(self, oGen, sTestFnName):
+ if self.fIsDas: from itgTableDas import g_aItgDasResults as aItgResults;
+ else: from itgTableDaa import g_aItgDaaResults as aItgResults;
+ cMax = len(aItgResults);
+ if oGen.isTiny():
+ cMax = 64;
+
+ oGen.write('VBINSTST_BEGINPROC %s\n' % (sTestFnName,));
+ oGen.write(' xor ebx, ebx\n');
+ oGen.write('.das_loop:\n');
+ # Save the loop variable so we can load known values.
+ oGen.write(' push ebx\n');
+ oGen.newSubTestEx('ebx');
+
+ # Push the results.
+ oGen.write(' movzx eax, byte [.abAlResults + ebx]\n');
+ oGen.write(' or eax, %#x\n' % (oGen.au32Regs[X86_GREG_xAX] & ~0xff,));
+ oGen.write(' push eax\n');
+ oGen.write(' movzx eax, byte [.aFlagsResults + ebx]\n');
+ oGen.write(' push eax\n');
+ # Calc and push the inputs.
+ oGen.write(' mov eax, ebx\n');
+ oGen.write(' shr eax, 2\n');
+ oGen.write(' and eax, 0ffh\n');
+ oGen.write(' or eax, %#x\n' % (oGen.au32Regs[X86_GREG_xAX] & ~0xff,));
+ oGen.write(' push eax\n');
+
+ oGen.write(' pushfd\n')
+ oGen.write(' and dword [xSP], ~(X86_EFL_CF | X86_EFL_AF)\n');
+ oGen.write(' mov al, bl\n');
+ oGen.write(' and al, 2\n');
+ oGen.write(' shl al, X86_EFL_AF_BIT - 1\n');
+ oGen.write(' or [xSP], al\n');
+ oGen.write(' mov al, bl\n');
+ oGen.write(' and al, X86_EFL_CF\n');
+ oGen.write(' or [xSP], al\n');
+
+ # Load register values and do the test.
+ oGen.write(' call VBINSTST_NAME(Common_LoadKnownValues)\n');
+ oGen.write(' popfd\n');
+ oGen.write(' pop eax\n');
+ if self.fIsDas:
+ oGen.write(' das\n');
+ else:
+ oGen.write(' daa\n');
+
+ # Verify the results.
+ fFlagsToCheck = X86_EFL_CF | X86_EFL_PF | X86_EFL_AF | X86_EFL_SF | X86_EFL_ZF;
+ oGen.write(' call VBINSTST_NAME(%s)\n' % (oGen.needFlagsGRegChecker(fFlagsToCheck, X86_GREG_xAX),));
+
+ # Restore the loop variable and advance.
+ oGen.write(' pop ebx\n');
+ oGen.write(' inc ebx\n');
+ oGen.write(' cmp ebx, %#x\n' % (cMax,));
+ oGen.write(' jb .das_loop\n');
+
+ oGen.write(' ret\n');
+
+ oGen.write('.abAlResults:\n');
+ for i in range(cMax):
+ oGen.write(' db %#x\n' % (aItgResults[i][0],));
+
+ oGen.write('.aFlagsResults:\n');
+ for i in range(cMax):
+ oGen.write(' db %#x\n' % (aItgResults[i][1],));
+
+ oGen.write('VBINSTST_ENDPROC %s\n' % (sTestFnName,));
+ return True;
+
+
+##
+# Instruction Tests.
+#
+g_aoInstructionTests = [
+ InstrTest_Mov_Gv_Ev(),
+ InstrTest_MovSxD_Gv_Ev(),
+ InstrTest_DivIDiv(fIsIDiv = False),
+ InstrTest_DivIDiv(fIsIDiv = True),
+ InstrTest_DaaDas(fIsDas = False),
+ InstrTest_DaaDas(fIsDas = True),
+];
+
+
+
+
+
+class InstructionTestGen(object): # pylint: disable=R0902
+ """
+ Instruction Test Generator.
+ """
+
+ ## @name Test size
+ ## @{
+ ksTestSize_Large = 'large';
+ ksTestSize_Medium = 'medium';
+ ksTestSize_Tiny = 'tiny';
+ ## @}
+ kasTestSizes = ( ksTestSize_Large, ksTestSize_Medium, ksTestSize_Tiny );
+
+ ## The prefix for the checker functions.
+ ksCheckerPrefix = 'Common_Check_'
+
+
+ def __init__(self, oOptions):
+ self.oOptions = oOptions;
+ self.oTarget = g_dTargetEnvs[oOptions.sTargetEnv];
+
+ # Calculate the number of output files.
+ self.cFiles = 1;
+ if len(g_aoInstructionTests) > self.oOptions.cInstrPerFile:
+ self.cFiles = len(g_aoInstructionTests) / self.oOptions.cInstrPerFile;
+ if self.cFiles * self.oOptions.cInstrPerFile < len(g_aoInstructionTests):
+ self.cFiles += 1;
+
+ # Fix the known register values.
+ self.au64Regs = randUxxList(64, 16);
+ self.au32Regs = [(self.au64Regs[i] & UINT32_MAX) for i in range(8)];
+ self.au16Regs = [(self.au64Regs[i] & UINT16_MAX) for i in range(8)];
+ self.auRegValues = self.au64Regs if self.oTarget.is64Bit() else self.au32Regs;
+
+ # Declare state variables used while generating.
+ self.oFile = sys.stderr;
+ self.iFile = -1;
+ self.sFile = '';
+ self._dCheckFns = dict();
+ self._dMemSetupFns = dict();
+ self._d64BitConsts = dict();
+
+ # State variables used while generating test convenientely placed here (lazy bird)...
+ self.iModReg = 0;
+ self.iModRm = 0;
+ self.iSibBaseReg = 0;
+ self.iSibIndexReg = 0;
+ self.iSibScale = 1;
+ if self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny:
+ self._oModRegRange = range(2);
+ self._oModRegRange8 = range(2);
+ self.oModRmRange = range(2);
+ self.cSibBasePerRun = 1;
+ self._cSibIndexPerRun = 2;
+ self.oSibScaleRange = range(1);
+ elif self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium:
+ self._oModRegRange = range( 5 if self.oTarget.is64Bit() else 4);
+ self._oModRegRange8 = range( 6 if self.oTarget.is64Bit() else 4);
+ self.oModRmRange = range(5);
+ self.cSibBasePerRun = 5;
+ self._cSibIndexPerRun = 4
+ self.oSibScaleRange = range(2);
+ else:
+ self._oModRegRange = range(16 if self.oTarget.is64Bit() else 8);
+ self._oModRegRange8 = range(20 if self.oTarget.is64Bit() else 8);
+ self.oModRmRange = range(16 if self.oTarget.is64Bit() else 8);
+ self.cSibBasePerRun = 8;
+ self._cSibIndexPerRun = 9;
+ self.oSibScaleRange = range(4);
+ self.iSibIndexRange = 0;
+
+
+ #
+ # Methods used by instruction tests.
+ #
+
+ def write(self, sText):
+ """ Writes to the current output file. """
+ return self.oFile.write(unicode(sText));
+
+ def writeln(self, sText):
+ """ Writes a line to the current output file. """
+ self.write(sText);
+ return self.write('\n');
+
+ def writeInstrBytes(self, abInstr):
+ """
+ Emits an instruction given as a sequence of bytes values.
+ """
+ self.write(' db %#04x' % (abInstr[0],));
+ for i in range(1, len(abInstr)):
+ self.write(', %#04x' % (abInstr[i],));
+ return self.write('\n');
+
+ def newSubTest(self):
+ """
+ Indicates that a new subtest has started.
+ """
+ self.write(' mov dword [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP], __LINE__\n');
+ return True;
+
+ def newSubTestEx(self, sIndicator):
+ """
+ Indicates that a new subtest has started.
+ """
+ self.write(' mov dword [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP], %s\n' % (sIndicator, ));
+ return True;
+
+ def needGRegChecker(self, iReg1, iReg2 = None, iReg3 = None):
+ """
+ Records the need for a given register checker function, returning its label.
+ """
+ if iReg2 is not None:
+ if iReg3 is not None:
+ sName = '%s_%s_%s' % (self.oTarget.asGRegs[iReg1], self.oTarget.asGRegs[iReg2], self.oTarget.asGRegs[iReg3],);
+ else:
+ sName = '%s_%s' % (self.oTarget.asGRegs[iReg1], self.oTarget.asGRegs[iReg2],);
+ else:
+ sName = '%s' % (self.oTarget.asGRegs[iReg1],);
+ assert iReg3 is None;
+
+ if sName in self._dCheckFns:
+ self._dCheckFns[sName] += 1;
+ else:
+ self._dCheckFns[sName] = 1;
+
+ return self.ksCheckerPrefix + sName;
+
+ def needFlagsGRegChecker(self, fFlagsToCheck, iReg1, iReg2 = None, iReg3 = None):
+ """
+ Records the need for a given rFLAGS + register checker function, returning its label.
+ """
+ sWorkerName = self.needGRegChecker(iReg1, iReg2, iReg3);
+
+ sName = 'eflags_%#x_%s' % (fFlagsToCheck, sWorkerName[len(self.ksCheckerPrefix):]);
+ if sName in self._dCheckFns:
+ self._dCheckFns[sName] += 1;
+ else:
+ self._dCheckFns[sName] = 1;
+
+ return self.ksCheckerPrefix + sName;
+
+ def needGRegMemSetup(self, cAddrBits, cbEffOp, iBaseReg = None, offDisp = None, iIndexReg = None, iScale = 1):
+ """
+ Records the need for a given register checker function, returning its label.
+ """
+ assert cAddrBits in [64, 32, 16];
+ assert cbEffOp in [8, 4, 2, 1];
+ assert iScale in [1, 2, 4, 8];
+
+ sName = '%ubit_U%u' % (cAddrBits, cbEffOp * 8,);
+ if iBaseReg is not None:
+ sName += '_%s' % (gregName(iBaseReg, cAddrBits),);
+ sName += '_x%u' % (iScale,);
+ if iIndexReg is not None:
+ sName += '_%s' % (gregName(iIndexReg, cAddrBits),);
+ if offDisp is not None:
+ sName += '_%#010x' % (offDisp & UINT32_MAX, );
+ if sName in self._dMemSetupFns:
+ self._dMemSetupFns[sName] += 1;
+ else:
+ self._dMemSetupFns[sName] = 1;
+ return 'Common_MemSetup_' + sName;
+
+ def need64BitConstant(self, uVal):
+ """
+ Records the need for a 64-bit constant, returning its label.
+ These constants are pooled to attempt reduce the size of the whole thing.
+ """
+ assert uVal >= 0 and uVal <= UINT64_MAX;
+ if uVal in self._d64BitConsts:
+ self._d64BitConsts[uVal] += 1;
+ else:
+ self._d64BitConsts[uVal] = 1;
+ return 'g_u64Const_0x%016x' % (uVal, );
+
+ def pushConst(self, uResult):
+ """
+ Emits a push constant value, taking care of high values on 64-bit hosts.
+ """
+ if self.oTarget.is64Bit() and uResult >= 0x80000000:
+ self.write(' push qword [%s wrt rip]\n' % (self.need64BitConstant(uResult),));
+ else:
+ self.write(' push dword 0x%x\n' % (uResult,));
+ return True;
+
+ def getDispForMod(self, iMod, cbAlignment = 1):
+ """
+ Get a set of address dispositions for a given addressing mode.
+ The alignment restriction is for SIB scaling.
+ """
+ assert cbAlignment in [1, 2, 4, 8];
+ if iMod == 0:
+ aoffDisp = [ None, ];
+ elif iMod == 1:
+ aoffDisp = [ 127 & ~(cbAlignment - 1), -128 ];
+ elif iMod == 2:
+ aoffDisp = [ 2147483647 & ~(cbAlignment - 1), -2147483648 ];
+ else: assert False;
+ return aoffDisp;
+
+ def getModRegRange(self, cbEffOp):
+ """
+ The Mod R/M register range varies with the effective operand size, for
+ 8-bit registers we have 4 more.
+ """
+ if cbEffOp == 1:
+ return self._oModRegRange8;
+ return self._oModRegRange;
+
+ def getSibIndexPerRun(self):
+ """
+ We vary the SIB index test range a little to try cover more operand
+ combinations and avoid repeating the same ones.
+ """
+ self.iSibIndexRange += 1;
+ self.iSibIndexRange %= 3;
+ if self.iSibIndexRange == 0:
+ return self._cSibIndexPerRun - 1;
+ return self._cSibIndexPerRun;
+
+ def isTiny(self):
+ """ Checks if we're in tiny mode."""
+ return self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Tiny;
+
+ def isMedium(self):
+ """ Checks if we're in medium mode."""
+ return self.oOptions.sTestSize == InstructionTestGen.ksTestSize_Medium;
+
+
+ #
+ # Forwarding calls for oTarget to shorted typing and lessen the attacks
+ # on the right margin.
+ #
+
+ def gregNameBits(self, iReg, cBitsWide):
+ """ Target: Get the name of a general register for the given size (in bits). """
+ return self.oTarget.gregNameBits(iReg, cBitsWide);
+
+ def gregNameBytes(self, iReg, cbWide):
+ """ Target: Get the name of a general register for the given size (in bytes). """
+ return self.oTarget.gregNameBytes(iReg, cbWide);
+
+ def is64Bit(self):
+ """ Target: Is the target 64-bit? """
+ return self.oTarget.is64Bit();
+
+
+ #
+ # Internal machinery.
+ #
+
+ def _randInitIndexes(self):
+ """
+ Initializes the Mod R/M and SIB state index with random numbers prior
+ to generating a test.
+
+ Note! As with all other randomness and variations we do, we cannot
+ test all combinations for each and every instruction so we try
+ get coverage over time.
+ """
+ self.iModReg = randU8();
+ self.iModRm = randU8();
+ self.iSibBaseReg = randU8();
+ self.iSibIndexReg = randU8();
+ self.iSibScale = 1 << (randU8() & 3);
+ self.iSibIndexRange = randU8();
+ return True;
+
+ def _calcTestFunctionName(self, oInstrTest, iInstrTest):
+ """
+ Calc a test function name for the given instruction test.
+ """
+ sName = 'TestInstr%03u_%s' % (iInstrTest, oInstrTest.sName);
+ return sName.replace(',', '_').replace(' ', '_').replace('%', '_');
+
+ def _generateFileHeader(self, ):
+ """
+ Writes the file header.
+ Raises exception on trouble.
+ """
+ self.write('; $Id: InstructionTestGen.py $\n'
+ ';; @file %s\n'
+ '; Autogenerate by %s %s. DO NOT EDIT\n'
+ ';\n'
+ '\n'
+ ';\n'
+ '; Headers\n'
+ ';\n'
+ '%%include "env-%s.mac"\n'
+ % ( os.path.basename(self.sFile),
+ os.path.basename(__file__), __version__[11:-1],
+ self.oTarget.sName,
+ ) );
+ # Target environment specific init stuff.
+
+ #
+ # Global variables.
+ #
+ self.write('\n\n'
+ ';\n'
+ '; Globals\n'
+ ';\n');
+ self.write('VBINSTST_BEGINDATA\n'
+ 'VBINSTST_GLOBALNAME_EX g_pvLow16Mem4K, data hidden\n'
+ ' dq 0\n'
+ 'VBINSTST_GLOBALNAME_EX g_pvLow32Mem4K, data hidden\n'
+ ' dq 0\n'
+ 'VBINSTST_GLOBALNAME_EX g_pvMem4K, data hidden\n'
+ ' dq 0\n'
+ 'VBINSTST_GLOBALNAME_EX g_uVBInsTstSubTestIndicator, data hidden\n'
+ ' dd 0\n'
+ '%ifdef VBINSTST_CAN_DO_TRAPS\n'
+ 'VBINSTST_TRAP_RECS_BEGIN\n'
+ '%endif\n'
+ 'VBINSTST_BEGINCODE\n'
+ );
+ self.write('%ifdef RT_ARCH_AMD64\n');
+ for i in range(len(g_asGRegs64)):
+ self.write('g_u64KnownValue_%s: dq 0x%x\n' % (g_asGRegs64[i], self.au64Regs[i]));
+ self.write('%endif\n\n')
+
+ #
+ # Common functions.
+ #
+
+ # Loading common values.
+ self.write('\n\n'
+ 'VBINSTST_BEGINPROC Common_LoadKnownValues\n'
+ '%ifdef RT_ARCH_AMD64\n');
+ for i in range(len(g_asGRegs64NoSp)):
+ if g_asGRegs64NoSp[i]:
+ self.write(' mov %s, 0x%x\n' % (g_asGRegs64NoSp[i], self.au64Regs[i],));
+ self.write('%else\n');
+ for i in range(8):
+ if g_asGRegs32NoSp[i]:
+ self.write(' mov %s, 0x%x\n' % (g_asGRegs32NoSp[i], self.au32Regs[i],));
+ self.write('%endif\n'
+ ' ret\n'
+ 'VBINSTST_ENDPROC Common_LoadKnownValues\n'
+ '\n');
+
+ self.write('VBINSTST_BEGINPROC Common_CheckKnownValues\n'
+ '%ifdef RT_ARCH_AMD64\n');
+ for i in range(len(g_asGRegs64NoSp)):
+ if g_asGRegs64NoSp[i]:
+ self.write(' cmp %s, [g_u64KnownValue_%s wrt rip]\n'
+ ' je .ok_%u\n'
+ ' push %u ; register number\n'
+ ' push %s ; actual\n'
+ ' push qword [g_u64KnownValue_%s wrt rip] ; expected\n'
+ ' call VBINSTST_NAME(Common_BadValue)\n'
+ '.ok_%u:\n'
+ % ( g_asGRegs64NoSp[i], g_asGRegs64NoSp[i], i, i, g_asGRegs64NoSp[i], g_asGRegs64NoSp[i], i,));
+ self.write('%else\n');
+ for i in range(8):
+ if g_asGRegs32NoSp[i]:
+ self.write(' cmp %s, 0x%x\n'
+ ' je .ok_%u\n'
+ ' push %u ; register number\n'
+ ' push %s ; actual\n'
+ ' push dword 0x%x ; expected\n'
+ ' call VBINSTST_NAME(Common_BadValue)\n'
+ '.ok_%u:\n'
+ % ( g_asGRegs32NoSp[i], self.au32Regs[i], i, i, g_asGRegs32NoSp[i], self.au32Regs[i], i,));
+ self.write('%endif\n'
+ ' ret\n'
+ 'VBINSTST_ENDPROC Common_CheckKnownValues\n'
+ '\n');
+
+ return True;
+
+ def _generateMemSetupFunctions(self): # pylint: disable=R0915
+ """
+ Generates the memory setup functions.
+ """
+ cDefAddrBits = self.oTarget.getDefAddrBits();
+ for sName in self._dMemSetupFns:
+ # Unpack it.
+ asParams = sName.split('_');
+ cAddrBits = int(asParams[0][:-3]); assert asParams[0][-3:] == 'bit';
+ cEffOpBits = int(asParams[1][1:]); assert asParams[1][0] == 'U';
+ if cAddrBits == 64: asAddrGRegs = g_asGRegs64;
+ elif cAddrBits == 32: asAddrGRegs = g_asGRegs32;
+ else: asAddrGRegs = g_asGRegs16;
+
+ i = 2;
+ iBaseReg = None;
+ sBaseReg = None;
+ if i < len(asParams) and asParams[i] in asAddrGRegs:
+ sBaseReg = asParams[i];
+ iBaseReg = asAddrGRegs.index(sBaseReg);
+ i += 1
+
+ assert i < len(asParams); assert asParams[i][0] == 'x';
+ iScale = iScale = int(asParams[i][1:]); assert iScale in [1, 2, 4, 8], '%u %s' % (iScale, sName);
+ i += 1;
+
+ sIndexReg = None;
+ iIndexReg = None;
+ if i < len(asParams) and asParams[i] in asAddrGRegs:
+ sIndexReg = asParams[i];
+ iIndexReg = asAddrGRegs.index(sIndexReg);
+ i += 1;
+
+ u32Disp = None;
+ if i < len(asParams) and len(asParams[i]) == 10:
+ u32Disp = long(asParams[i], 16);
+ i += 1;
+
+ assert i == len(asParams), 'i=%d len=%d len[i]=%d (%s)' % (i, len(asParams), len(asParams[i]), asParams[i],);
+ assert iScale == 1 or iIndexReg is not None;
+
+ # Find a temporary register.
+ iTmpReg1 = X86_GREG_xCX;
+ while iTmpReg1 in [iBaseReg, iIndexReg]:
+ iTmpReg1 += 1;
+
+ # Prologue.
+ self.write('\n\n'
+ '; cAddrBits=%s cEffOpBits=%s iBaseReg=%s u32Disp=%s iIndexReg=%s iScale=%s\n'
+ 'VBINSTST_BEGINPROC Common_MemSetup_%s\n'
+ ' MY_PUSH_FLAGS\n'
+ ' push %s\n'
+ % ( cAddrBits, cEffOpBits, iBaseReg, u32Disp, iIndexReg, iScale,
+ sName, self.oTarget.asGRegs[iTmpReg1], ));
+
+ # Figure out what to use.
+ if cEffOpBits == 64:
+ sTmpReg1 = g_asGRegs64[iTmpReg1];
+ sDataVar = 'VBINSTST_NAME(g_u64Data)';
+ elif cEffOpBits == 32:
+ sTmpReg1 = g_asGRegs32[iTmpReg1];
+ sDataVar = 'VBINSTST_NAME(g_u32Data)';
+ elif cEffOpBits == 16:
+ sTmpReg1 = g_asGRegs16[iTmpReg1];
+ sDataVar = 'VBINSTST_NAME(g_u16Data)';
+ else:
+ assert cEffOpBits == 8; assert iTmpReg1 < 4;
+ sTmpReg1 = g_asGRegs8Rex[iTmpReg1];
+ sDataVar = 'VBINSTST_NAME(g_u8Data)';
+
+ # Special case: reg + reg * [2,4,8]
+ if iBaseReg == iIndexReg and iBaseReg is not None and iScale != 1:
+ iTmpReg2 = X86_GREG_xBP;
+ while iTmpReg2 in [iBaseReg, iIndexReg, iTmpReg1]:
+ iTmpReg2 += 1;
+ sTmpReg2 = self.gregNameBits(iTmpReg2, cAddrBits);
+ self.write(' push sAX\n'
+ ' push %s\n'
+ ' push sDX\n'
+ % (self.oTarget.asGRegs[iTmpReg2],));
+ if cAddrBits == 16:
+ self.write(' mov %s, [VBINSTST_NAME(g_pvLow16Mem4K) xWrtRIP]\n' % (sTmpReg2,));
+ else:
+ self.write(' mov %s, [VBINSTST_NAME(g_pvLow32Mem4K) xWrtRIP]\n' % (sTmpReg2,));
+ self.write(' add %s, 0x200\n' % (sTmpReg2,));
+ self.write(' mov %s, %s\n' % (self.gregNameBits(X86_GREG_xAX, cAddrBits), sTmpReg2,));
+ if u32Disp is not None:
+ self.write(' sub %s, %d\n'
+ % ( self.gregNameBits(X86_GREG_xAX, cAddrBits), convU32ToSigned(u32Disp), ));
+ self.write(' xor edx, edx\n'
+ '%if xCB == 2\n'
+ ' push 0\n'
+ '%endif\n');
+ self.write(' push %u\n' % (iScale + 1,));
+ self.write(' div %s [xSP]\n' % ('qword' if cAddrBits == 64 else 'dword',));
+ self.write(' sub %s, %s\n' % (sTmpReg2, self.gregNameBits(X86_GREG_xDX, cAddrBits),));
+ self.write(' pop sDX\n'
+ ' pop sDX\n'); # sTmpReg2 is eff address; sAX is sIndexReg value.
+ # Note! sTmpReg1 can be xDX and that's no problem now.
+ self.write(' mov %s, [xSP + sCB*3 + MY_PUSH_FLAGS_SIZE + xCB]\n' % (sTmpReg1,));
+ self.write(' mov [%s], %s\n' % (sTmpReg2, sTmpReg1,)); # Value in place.
+ self.write(' pop %s\n' % (self.oTarget.asGRegs[iTmpReg2],));
+ if iBaseReg == X86_GREG_xAX:
+ self.write(' pop %s\n' % (self.oTarget.asGRegs[iTmpReg1],));
+ else:
+ self.write(' mov %s, %s\n' % (sBaseReg, self.gregNameBits(X86_GREG_xAX, cAddrBits),));
+ self.write(' pop sAX\n');
+
+ else:
+ # Load the value and mem address, storing the value there.
+ # Note! ASSUMES that the scale and disposition works fine together.
+ sAddrReg = sBaseReg if sBaseReg is not None else sIndexReg;
+ self.write(' mov %s, [xSP + sCB + MY_PUSH_FLAGS_SIZE + xCB]\n' % (sTmpReg1,));
+ if cAddrBits >= cDefAddrBits:
+ self.write(' mov [%s xWrtRIP], %s\n' % (sDataVar, sTmpReg1,));
+ self.write(' lea %s, [%s xWrtRIP]\n' % (sAddrReg, sDataVar,));
+ else:
+ if cAddrBits == 16:
+ self.write(' mov %s, [VBINSTST_NAME(g_pvLow16Mem4K) xWrtRIP]\n' % (sAddrReg,));
+ else:
+ self.write(' mov %s, [VBINSTST_NAME(g_pvLow32Mem4K) xWrtRIP]\n' % (sAddrReg,));
+ self.write(' add %s, %s\n' % (sAddrReg, (randU16() << cEffOpBits) & 0xfff, ));
+ self.write(' mov [%s], %s\n' % (sAddrReg, sTmpReg1, ));
+
+ # Adjust for disposition and scaling.
+ if u32Disp is not None:
+ self.write(' sub %s, %d\n' % ( sAddrReg, convU32ToSigned(u32Disp), ));
+ if iIndexReg is not None:
+ if iBaseReg == iIndexReg:
+ assert iScale == 1;
+ assert u32Disp is None or (u32Disp & 1) == 0;
+ self.write(' shr %s, 1\n' % (sIndexReg,));
+ elif sBaseReg is not None:
+ uIdxRegVal = randUxx(cAddrBits);
+ if cAddrBits == 64:
+ self.write(' mov %s, %u\n'
+ ' sub %s, %s\n'
+ ' mov %s, %u\n'
+ % ( sIndexReg, (uIdxRegVal * iScale) & UINT64_MAX,
+ sBaseReg, sIndexReg,
+ sIndexReg, uIdxRegVal, ));
+ else:
+ assert cAddrBits == 32;
+ self.write(' mov %s, %u\n'
+ ' sub %s, %#06x\n'
+ % ( sIndexReg, uIdxRegVal, sBaseReg, (uIdxRegVal * iScale) & UINT32_MAX, ));
+ elif iScale == 2:
+ assert u32Disp is None or (u32Disp & 1) == 0;
+ self.write(' shr %s, 1\n' % (sIndexReg,));
+ elif iScale == 4:
+ assert u32Disp is None or (u32Disp & 3) == 0;
+ self.write(' shr %s, 2\n' % (sIndexReg,));
+ elif iScale == 8:
+ assert u32Disp is None or (u32Disp & 7) == 0;
+ self.write(' shr %s, 3\n' % (sIndexReg,));
+ else:
+ assert iScale == 1;
+
+ # Set upper bits that's supposed to be unused.
+ if cDefAddrBits > cAddrBits or cAddrBits == 16:
+ if cDefAddrBits == 64:
+ assert cAddrBits == 32;
+ if iBaseReg is not None:
+ self.write(' mov %s, %#018x\n'
+ ' or %s, %s\n'
+ % ( g_asGRegs64[iTmpReg1], randU64() & 0xffffffff00000000,
+ g_asGRegs64[iBaseReg], g_asGRegs64[iTmpReg1],));
+ if iIndexReg is not None and iIndexReg != iBaseReg:
+ self.write(' mov %s, %#018x\n'
+ ' or %s, %s\n'
+ % ( g_asGRegs64[iTmpReg1], randU64() & 0xffffffff00000000,
+ g_asGRegs64[iIndexReg], g_asGRegs64[iTmpReg1],));
+ else:
+ assert cDefAddrBits == 32; assert cAddrBits == 16; assert iIndexReg is None;
+ if iBaseReg is not None:
+ self.write(' or %s, %#010x\n'
+ % ( g_asGRegs32[iBaseReg], randU32() & 0xffff0000, ));
+
+ # Epilogue.
+ self.write(' pop %s\n'
+ ' MY_POP_FLAGS\n'
+ ' ret sCB\n'
+ 'VBINSTST_ENDPROC Common_MemSetup_%s\n'
+ % ( self.oTarget.asGRegs[iTmpReg1], sName,));
+
+
+ def _generateFileFooter(self):
+ """
+ Generates file footer.
+ """
+
+ # Terminate the trap records.
+ self.write('\n\n'
+ ';\n'
+ '; Terminate the trap records\n'
+ ';\n'
+ 'VBINSTST_BEGINDATA\n'
+ '%ifdef VBINSTST_CAN_DO_TRAPS\n'
+ 'VBINSTST_TRAP_RECS_END\n'
+ '%endif\n'
+ 'VBINSTST_BEGINCODE\n');
+
+ # Register checking functions.
+ for sName in self._dCheckFns:
+ asRegs = sName.split('_');
+ sPushSize = 'dword';
+
+ # Do we check eflags first.
+ if asRegs[0] == 'eflags':
+ asRegs.pop(0);
+ sFlagsToCheck = asRegs.pop(0);
+ self.write('\n\n'
+ '; Check flags and then defers to the register-only checker\n'
+ '; To save space, the callee cleans up the stack.'
+ '; Ref count: %u\n'
+ 'VBINSTST_BEGINPROC %s%s\n'
+ ' MY_PUSH_FLAGS\n'
+ ' push sAX\n'
+ ' mov sAX, [xSP + sCB]\n'
+ ' and sAX, %s\n'
+ ' cmp sAX, [xSP + xCB + sCB*2]\n'
+ ' je .equal\n'
+ % ( self._dCheckFns[sName], self.ksCheckerPrefix, sName,
+ sFlagsToCheck,));
+ self.write(' push dword 0xef ; register number\n'
+ ' push sAX ; actual\n'
+ ' mov sAX, [xSP + xCB + sCB*4]\n'
+ ' push sAX ; expected\n'
+ ' call VBINSTST_NAME(Common_BadValue)\n');
+ self.write('.equal:\n'
+ ' mov xAX, [xSP + sCB*2]\n' # Remove the expected eflags value from the stack frame.
+ ' mov [xSP + sCB*2 + xCB + sCB - xCB], xAX\n'
+ ' pop sAX\n'
+ ' MY_POP_FLAGS\n'
+ ' lea xSP, [xSP + sCB]\n'
+ ' jmp VBINSTST_NAME(Common_Check_%s)\n'
+ 'VBINSTST_ENDPROC %s%s\n'
+ % ( '_'.join(asRegs),
+ self.ksCheckerPrefix, sName,) );
+ else:
+ # Prologue
+ self.write('\n\n'
+ '; Checks 1 or more register values, expected values pushed on the stack.\n'
+ '; To save space, the callee cleans up the stack.'
+ '; Ref count: %u\n'
+ 'VBINSTST_BEGINPROC %s%s\n'
+ ' MY_PUSH_FLAGS\n'
+ % ( self._dCheckFns[sName], self.ksCheckerPrefix, sName, ) );
+
+ # Register checks.
+ for i in range(len(asRegs)):
+ sReg = asRegs[i];
+ iReg = self.oTarget.asGRegs.index(sReg);
+ if i == asRegs.index(sReg): # Only check once, i.e. input = output reg.
+ self.write(' cmp %s, [xSP + MY_PUSH_FLAGS_SIZE + xCB + sCB * %u]\n'
+ ' je .equal%u\n'
+ ' push %s %u ; register number\n'
+ ' push %s ; actual\n'
+ ' mov %s, [xSP + sCB*2 + MY_PUSH_FLAGS_SIZE + xCB + sCB * %u]\n'
+ ' push %s ; expected\n'
+ ' call VBINSTST_NAME(Common_BadValue)\n'
+ '.equal%u:\n'
+ % ( sReg, i, i, sPushSize, iReg, sReg, sReg, i, sReg, i, ) );
+
+
+ # Restore known register values and check the other registers.
+ for sReg in asRegs:
+ if self.oTarget.is64Bit():
+ self.write(' mov %s, [g_u64KnownValue_%s wrt rip]\n' % (sReg, sReg,));
+ else:
+ iReg = self.oTarget.asGRegs.index(sReg)
+ self.write(' mov %s, 0x%x\n' % (sReg, self.au32Regs[iReg],));
+ self.write(' MY_POP_FLAGS\n'
+ ' call VBINSTST_NAME(Common_CheckKnownValues)\n'
+ ' ret sCB*%u\n'
+ 'VBINSTST_ENDPROC %s%s\n'
+ % (len(asRegs), self.ksCheckerPrefix, sName,));
+
+ # memory setup functions
+ self._generateMemSetupFunctions();
+
+ # 64-bit constants.
+ if len(self._d64BitConsts) > 0:
+ self.write('\n\n'
+ ';\n'
+ '; 64-bit constants\n'
+ ';\n');
+ for uVal in self._d64BitConsts:
+ self.write('g_u64Const_0x%016x: dq 0x%016x ; Ref count: %d\n' % (uVal, uVal, self._d64BitConsts[uVal], ) );
+
+ return True;
+
+ def _generateTests(self):
+ """
+ Generate the test cases.
+ """
+ for self.iFile in range(self.cFiles):
+ if self.cFiles == 1:
+ self.sFile = '%s.asm' % (self.oOptions.sOutputBase,)
+ else:
+ self.sFile = '%s-%u.asm' % (self.oOptions.sOutputBase, self.iFile)
+ self.oFile = sys.stdout;
+ if self.oOptions.sOutputBase != '-':
+ self.oFile = io.open(self.sFile, 'w', buffering = 65536, encoding = 'utf-8');
+
+ self._generateFileHeader();
+
+ # Calc the range.
+ iInstrTestStart = self.iFile * self.oOptions.cInstrPerFile;
+ iInstrTestEnd = iInstrTestStart + self.oOptions.cInstrPerFile;
+ if iInstrTestEnd > len(g_aoInstructionTests):
+ iInstrTestEnd = len(g_aoInstructionTests);
+
+ # Generate the instruction tests.
+ for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
+ oInstrTest = g_aoInstructionTests[iInstrTest];
+ if oInstrTest.isApplicable(self):
+ self.write('\n'
+ '\n'
+ ';\n'
+ '; %s\n'
+ ';\n'
+ % (oInstrTest.sName,));
+ self._randInitIndexes();
+ oInstrTest.generateTest(self, self._calcTestFunctionName(oInstrTest, iInstrTest));
+
+ # Generate the main function.
+ self.write('\n\n'
+ 'VBINSTST_BEGINPROC TestInstrMain\n'
+ ' MY_PUSH_ALL\n'
+ ' sub xSP, 40h\n'
+ '%ifdef VBINSTST_CAN_DO_TRAPS\n'
+ ' VBINSTST_TRAP_RECS_INSTALL\n'
+ '%endif\n'
+ '\n');
+
+ for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
+ oInstrTest = g_aoInstructionTests[iInstrTest];
+ if oInstrTest.isApplicable(self):
+ self.write('%%ifdef ASM_CALL64_GCC\n'
+ ' lea rdi, [.szInstr%03u wrt rip]\n'
+ '%%elifdef ASM_CALL64_MSC\n'
+ ' lea rcx, [.szInstr%03u wrt rip]\n'
+ '%%else\n'
+ ' mov xAX, .szInstr%03u\n'
+ ' mov [xSP], xAX\n'
+ '%%endif\n'
+ ' VBINSTST_CALL_FN_SUB_TEST\n'
+ ' call VBINSTST_NAME(%s)\n'
+ % ( iInstrTest, iInstrTest, iInstrTest, self._calcTestFunctionName(oInstrTest, iInstrTest)));
+
+ self.write('\n'
+ '%ifdef VBINSTST_CAN_DO_TRAPS\n'
+ ' VBINSTST_TRAP_RECS_UNINSTALL\n'
+ '%endif\n'
+ ' add xSP, 40h\n'
+ ' MY_POP_ALL\n'
+ ' ret\n\n');
+ for iInstrTest in range(iInstrTestStart, iInstrTestEnd):
+ self.write('.szInstr%03u: db \'%s\', 0\n' % (iInstrTest, g_aoInstructionTests[iInstrTest].sName,));
+ self.write('VBINSTST_ENDPROC TestInstrMain\n\n');
+
+ self._generateFileFooter();
+ if self.oOptions.sOutputBase != '-':
+ self.oFile.close();
+ self.oFile = None;
+ self.sFile = '';
+
+ return RTEXITCODE_SUCCESS;
+
+ def _runMakefileMode(self):
+ """
+ Generate a list of output files on standard output.
+ """
+ if self.cFiles == 1:
+ print('%s.asm' % (self.oOptions.sOutputBase,));
+ else:
+ print(' '.join('%s-%s.asm' % (self.oOptions.sOutputBase, i) for i in range(self.cFiles)));
+ return RTEXITCODE_SUCCESS;
+
+ def run(self):
+ """
+ Generates the tests or whatever is required.
+ """
+ if self.oOptions.fMakefileMode:
+ return self._runMakefileMode();
+ sys.stderr.write('InstructionTestGen.py: Seed = %s\n' % (g_iMyRandSeed,));
+ return self._generateTests();
+
+ @staticmethod
+ def main():
+ """
+ Main function a la C/C++. Returns exit code.
+ """
+
+ #
+ # Parse the command line.
+ #
+ oParser = OptionParser(version = __version__[11:-1].strip());
+ oParser.add_option('--makefile-mode', dest = 'fMakefileMode', action = 'store_true', default = False,
+ help = 'Special mode for use to output a list of output files for the benefit of '
+ 'the make program (kmk).');
+ oParser.add_option('--split', dest = 'cInstrPerFile', metavar = '<instr-per-file>', type = 'int', default = 9999999,
+ help = 'Number of instruction to test per output file.');
+ oParser.add_option('--output-base', dest = 'sOutputBase', metavar = '<file>', default = None,
+ help = 'The output file base name, no suffix please. Required.');
+ oParser.add_option('--target', dest = 'sTargetEnv', metavar = '<target>',
+ default = 'iprt-r3-32',
+ choices = g_dTargetEnvs.keys(),
+ help = 'The target environment. Choices: %s'
+ % (', '.join(sorted(g_dTargetEnvs.keys())),));
+ oParser.add_option('--test-size', dest = 'sTestSize', default = InstructionTestGen.ksTestSize_Medium,
+ choices = InstructionTestGen.kasTestSizes,
+ help = 'Selects the test size.');
+
+ (oOptions, asArgs) = oParser.parse_args();
+ if len(asArgs) > 0:
+ oParser.print_help();
+ return RTEXITCODE_SYNTAX
+ if oOptions.sOutputBase is None:
+ print('syntax error: Missing required option --output-base.', file = sys.stderr);
+ return RTEXITCODE_SYNTAX
+
+ #
+ # Instantiate the program class and run it.
+ #
+ oProgram = InstructionTestGen(oOptions);
+ return oProgram.run();
+
+
+if __name__ == '__main__':
+ sys.exit(InstructionTestGen.main());
+
diff --git a/src/VBox/VMM/testcase/Instructions/Makefile.kmk b/src/VBox/VMM/testcase/Instructions/Makefile.kmk
new file mode 100644
index 00000000..8b34f8cd
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/Makefile.kmk
@@ -0,0 +1,69 @@
+# $Id: Makefile.kmk $
+## @file
+# Sub-Makefile for the X86 and AMD64 Instruction Tests.
+#
+
+#
+# Copyright (C) 2006-2020 Oracle Corporation
+#
+# This file is part of VirtualBox Open Source Edition (OSE), as
+# available from http://www.virtualbox.org. This file is free software;
+# you can redistribute it and/or modify it under the terms of the GNU
+# General Public License (GPL) as published by the Free Software
+# Foundation, in version 2 as it comes in the "COPYING" file of the
+# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+#
+
+SUB_DEPTH = ../../../../..
+include $(KBUILD_PATH)/subheader.kmk
+
+#
+# Python linting (can't live without pylint!).
+#
+ifdef VBOX_WITH_PYLINT
+TESTING +=
+endif
+BLDDIRS += $(PATH_TARGET)/pylint
+
+define def_vbox_instructions_py_check
+$(eval name:=$(basename $(notdir $(py))))
+
+pylint: $(name)-py-phony.o
+$(name).o: $(name)-py-phony.o
+$(PATH_TARGET)/pylint/$(name).o $(name)-py-phony.o:: $(py) | $(PATH_TARGET)/pylint/
+ifdef VBOX_WITH_PYLINT
+ $(QUIET2)$(call MSG_L1,Subjecting $(py) to pylint...)
+ $(QUIET)$(REDIRECT_EXT) -E LC_ALL=C -E PYTHONPATH="$(dir $(py))" -C $(dir $(py)) \
+ -- $$(VBOX_PYLINT) $$(VBOX_PYLINT_FLAGS) $$($(py)_VBOX_PYLINT_FLAGS) ./$(notdir $(py))
+endif
+ $(QUIET)$(APPEND) -t "$(PATH_TARGET)/pylint/$(name).o"
+
+TESTING += $(name)-py-phony.o
+endef # def_vbox_instructions_py_check
+
+
+$(foreach py, $(addprefix $(PATH_SUB_CURRENT)/, InstructionTestGen.py ) , $(eval $(def_vbox_instructions_py_check)))
+
+
+
+#
+# Ring-3 test program based on IPRT.
+#
+PROGRAMS += tstVBInsTstR3
+tstVBInsTstR3_TEMPLATE = VBOXR3TSTEXE
+tstVBInsTstR3_INCS = .
+tstVBInsTstR3_SOURCES = \
+ tstVBInsTstR3.cpp \
+ $(tstVBInsTstR3_0_OUTDIR)/tstVBInsTstR3A.asm
+tstVBInsTstR3_CLEAN = \
+ $(tstVBInsTstR3_0_OUTDIR)/tstVBInsTstR3A.asm
+
+$$(tstVBInsTstR3_0_OUTDIR)/tstVBInsTstR3A.asm: $(PATH_SUB_CURRENT)/InstructionTestGen.py
+ $(VBOX_BLD_PYTHON) $(PATH_SUB_CURRENT)/InstructionTestGen.py \
+ --target iprt-r3-$(if-expr $(intersects $(KBUILD_TARGET_ARCH), $(KBUILD_ARCHES_64)),64,32) \
+ --output-base $(basename $@)
+
+
+include $(FILE_KBUILD_SUB_FOOTER)
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-bs2-r0-32-big.mac b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-32-big.mac
new file mode 100644
index 00000000..7cc3a416
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-32-big.mac
@@ -0,0 +1,35 @@
+; $Id: env-bs2-r0-32-big.mac $
+;; @file
+; Instruction Test Environment - Big Boot Sector Type 2, Ring-0, 64-Bit.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%undef RT_ARCH_AMD64
+%undef RT_ARCH_X86
+%undef RT_ARCH_X86_32
+%undef RT_ARCH_X86_16
+%undef ASM_CALL64_MSC
+%undef ASM_CALL64_GCC
+%undef ASM_CALL64_BS2
+%undef ARCH_BITS
+%undef xWrtRIP
+
+%define ARCH_BITS 32
+%define RT_ARCH_X86
+%define ASM_CALL32_BS2
+%define xWrtRIP
+%define RTCCPTR_PRE dword
+
+%include "env-bs2-r0-big.mac"
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-bs2-r0-64-big.mac b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-64-big.mac
new file mode 100644
index 00000000..c2f4275c
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-64-big.mac
@@ -0,0 +1,35 @@
+; $Id: env-bs2-r0-64-big.mac $
+;; @file
+; Instruction Test Environment - Big Boot Sector Type 2, Ring-0, 64-Bit.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%undef RT_ARCH_AMD64
+%undef RT_ARCH_X86
+%undef RT_ARCH_X86_32
+%undef RT_ARCH_X86_16
+%undef ASM_CALL64_MSC
+%undef ASM_CALL64_GCC
+%undef ASM_CALL64_BS2
+%undef ARCH_BITS
+%undef xWrtRIP
+
+%define ARCH_BITS 64
+%define RT_ARCH_AMD64
+%define ASM_CALL64_BS2
+%define xWrtRIP wrt rip
+%define RTCCPTR_PRE qword
+
+%include "env-bs2-r0-big.mac"
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-bs2-r0-64.mac b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-64.mac
new file mode 100644
index 00000000..ca54c801
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-64.mac
@@ -0,0 +1,35 @@
+; $Id: env-bs2-r0-64.mac $
+;; @file
+; Instruction Test Environment - Boot Sector Type 2, Ring-0, 64-Bit.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%undef RT_ARCH_AMD64
+%undef RT_ARCH_X86
+%undef RT_ARCH_X86_32
+%undef RT_ARCH_X86_16
+%undef ASM_CALL64_MSC
+%undef ASM_CALL64_GCC
+%undef ASM_CALL64_BS2
+%undef ARCH_BITS
+%undef xWrtRIP
+
+%define ARCH_BITS 64
+%define RT_ARCH_AMD64
+%define ASM_CALL64_BS2
+%define xWrtRIP wrt rip
+%define RTCCPTR_PRE qword
+
+%include "env-bs2-r0.mac"
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-bs2-r0-big.mac b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-big.mac
new file mode 100644
index 00000000..caa3b8d5
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-big.mac
@@ -0,0 +1,57 @@
+; $Id: env-bs2-r0-big.mac $
+;; @file
+; Instruction Test Environment - Big Boot Sector Type 2, Ring-0.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%ifndef ___env_bs2_r0_big_mac
+%define ___env_bs2_r0_big_mac
+
+;
+; Include the BS2 API for BIG images.
+;
+%include "bootsector2-api.mac"
+
+
+;; Call RTTestISub like function.
+%define VBINSTST_CALL_FN_SUB_TEST call [TMPL_NM_CMN(g_pfnTestSub) xWrtRIP]
+
+;; Call RTTestIFailure like function with simple message.
+%define VBINSTST_CALL_FN_FAILURE call [TMPL_NM_CMN(g_pfnTestFailedF) xWrtRIP]
+
+;; Call RTTestIFailure like function with format message + 1 arg.
+%define VBINSTST_CALL_FN_FAILURE_1 call [TMPL_NM_CMN(g_pfnTestFailedF) xWrtRIP]
+
+;; Call RTTestIFailure like function with format message + 2 args.
+%define VBINSTST_CALL_FN_FAILURE_2 call [TMPL_NM_CMN(g_pfnTestFailedF) xWrtRIP]
+
+;; Call RTTestIFailure like function with format message + 3 args.
+%define VBINSTST_CALL_FN_FAILURE_3 call [TMPL_NM_CMN(g_pfnTestFailedF) xWrtRIP]
+
+;; Call RTTestIFailure like function with format message + 4 args.
+%define VBINSTST_CALL_FN_FAILURE_4 call [TMPL_NM_CMN(g_pfnTestFailedF) xWrtRIP]
+
+;; The image base label (used by the trap macros).
+%define VBINSTST_IMAGE_BASE_LABLE bs2_big_image_start
+
+;; Wrapper for calling TestInstallTrapRecs (used by the trap macros).
+%define VBINSTST_CALL_TEST_INSTALL_TRAP_RECS call [TMPL_NM_CMN(g_pfnTestInstallTrapRecs) xWrtRIP]
+
+;
+; Include the common bits (contains code using above macros)
+;
+%include "env-bs2-r0-common.mac"
+
+%endif
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-bs2-r0-common.mac b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-common.mac
new file mode 100644
index 00000000..3b3bcf2d
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-bs2-r0-common.mac
@@ -0,0 +1,115 @@
+; $Id: env-bs2-r0-common.mac $
+;; @file
+; Instruction Test Environment - Boot Sector Type 2, Ring-0.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%ifndef ___env_bs2_r0_common_mac
+%define ___env_bs2_r0_common_mac
+
+
+;; Same as BEGINPROC in asmdefs.mac.
+%macro VBINSTST_BEGINPROC 1
+VBINSTST_GLOBALNAME_EX %1, function hidden
+%endm
+
+;; Same as ENDPROC in asmdefs.mac.
+%macro VBINSTST_ENDPROC 1,
+VBINSTST_GLOBALNAME_EX %1 %+ _EndProc, function hidden
+%endm
+
+;; Same as NAME in asmdefs.mac.
+%define VBINSTST_NAME(a_Name) TMPL_NM(a_Name)
+
+;; Same as GLOBALNAME_EX in asmdefs.mac.
+%macro VBINSTST_GLOBALNAME_EX 2,
+VBINSTST_NAME(%1):
+%endmacro
+
+;; Same as BEGINCODE in asmdefs.mac.
+%macro VBINSTST_BEGINCODE 0,
+BEGINCODE
+%endmacro
+
+;; Same as BEGINDATA in asmdefs.mac.
+%macro VBINSTST_BEGINDATA 0,
+BEGINDATA
+%endmacro
+
+
+;
+; Trap related macros.
+;
+%define VBINSTST_CAN_DO_TRAPS 1
+
+%macro VBINSTST_TRAP_INSTR 3+,
+ section .traprecs
+ istruc BS2TRAPREC
+ at BS2TRAPREC.offWhere, dd (%%trapinstr - VBINSTST_IMAGE_BASE_LABLE)
+ at BS2TRAPREC.offResumeAddend, db (%%resume - %%trapinstr)
+ at BS2TRAPREC.u8TrapNo, db %1
+ at BS2TRAPREC.u16ErrCd, dw %2
+ iend
+ VBINSTST_BEGINCODE
+ %if %1 != X86_XCPT_BP
+ %%trapinstr:
+ %3
+ %else
+ %3
+ %%trapinstr:
+ %endif
+ call VBINSTST_NAME(Common_MissingTrap_ %+ %1)
+ %%resume:
+%endmacro
+
+%macro VBINSTST_TRAP_RECS_BEGIN 0,
+ VBINSTST_BEGINDATA
+ section .traprecs progbits valign=8 vfollows=.data align=8 follows=.data
+ dq 0ffffffffeeeeeeeeh
+ dq 0ddddddddcccccccch
+VBINSTST_GLOBALNAME_EX g_aTrapRecs, hidden
+ VBINSTST_BEGINCODE
+%endmacro
+
+%macro VBINSTST_TRAP_RECS_END 0,
+ section .traprecs
+VBINSTST_GLOBALNAME_EX g_aTrapRecsEnd, hidden
+ dq 0ddddddddcccccccch
+ dq 0ffffffffeeeeeeeeh
+ VBINSTST_BEGINCODE
+%endmacro
+
+%macro VBINSTST_TRAP_RECS_INSTALL 0,
+ mov sAX, VBINSTST_NAME(g_aTrapRecs)
+ mov edx, VBINSTST_NAME(g_aTrapRecsEnd) - VBINSTST_NAME(g_aTrapRecs)
+ shr edx, BS2TRAPREC_SIZE_SHIFT
+ mov sCX, VBINSTST_IMAGE_BASE_LABLE
+ VBINSTST_CALL_TEST_INSTALL_TRAP_RECS
+%endmacro
+
+%macro VBINSTST_TRAP_RECS_UNINSTALL 0,
+ xor sAX, sAX
+ xor edx, edx
+ xor sCX, sCX
+ VBINSTST_CALL_TEST_INSTALL_TRAP_RECS
+%endmacro
+
+
+;
+; Include the common bits (contains code using above macros)
+;
+%include "env-common.mac"
+
+%endif
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-bs2-r0.mac b/src/VBox/VMM/testcase/Instructions/env-bs2-r0.mac
new file mode 100644
index 00000000..202754d0
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-bs2-r0.mac
@@ -0,0 +1,53 @@
+; $Id: env-bs2-r0.mac $
+;; @file
+; Instruction Test Environment - Boot Sector Type 2, Ring-0.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%ifndef ___env_bs2_r0_mac
+%define ___env_bs2_r0_mac
+
+
+;; Call RTTestISub like function.
+%define VBINSTST_CALL_FN_SUB_TEST call TMPL_NM_CMN(TestSub)
+
+;; Call RTTestIFailure like function with simple message.
+%define VBINSTST_CALL_FN_FAILURE call TMPL_NM_CMN(TestFailedF)
+
+;; Call RTTestIFailure like function with format message + 1 arg.
+%define VBINSTST_CALL_FN_FAILURE_1 call TMPL_NM_CMN(TestFailedF)
+
+;; Call RTTestIFailure like function with format message + 2 args.
+%define VBINSTST_CALL_FN_FAILURE_2 call TMPL_NM_CMN(TestFailedF)
+
+;; Call RTTestIFailure like function with format message + 3 args.
+%define VBINSTST_CALL_FN_FAILURE_3 call TMPL_NM_CMN(TestFailedF)
+
+;; Call RTTestIFailure like function with format message + 4 args.
+%define VBINSTST_CALL_FN_FAILURE_4 call TMPL_NM_CMN(TestFailedF)
+
+;; The image base label (used by the trap macros).
+%define VBINSTST_IMAGE_BASE_LABLE start
+
+;; Wrapper for calling TestInstallTrapRecs (used by the trap macros).
+%define VBINSTST_CALL_TEST_INSTALL_TRAP_RECS call TMPL_NM_CMN(TestInstallTrapRecs)
+
+
+;
+; Include the common bits (contains code using above macros)
+;
+%include "env-bs2-r0-common.mac"
+
+%endif
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-common.mac b/src/VBox/VMM/testcase/Instructions/env-common.mac
new file mode 100644
index 00000000..12879f58
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-common.mac
@@ -0,0 +1,346 @@
+; $Id: env-common.mac $
+;; @file
+; Instruction Test Environment - Common Bits.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%ifndef ___env_common_mac
+%define ___env_common_mac
+
+%include "iprt/x86.mac"
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%ifdef RT_ARCH_AMD64
+ %define MY_PUSH_FLAGS pushfq
+ %define MY_POP_FLAGS popfq
+ %define MY_PUSH_FLAGS_SIZE 8
+
+ %macro MY_PUSH_ALL 0
+ push rbp
+ mov rbp, rsp
+ push rax
+ push rbx
+ push rcx
+ push rdx
+ push rsi
+ push rdi
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+ push r13
+ push r14
+ push r15
+ pushfq
+ %endm
+ %macro MY_POP_ALL 0
+ popfq
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+ pop rdi
+ pop rsi
+ pop rdx
+ pop rcx
+ pop rbx
+ pop rax
+ pop rbp
+ %endm
+
+%else
+ %define MY_PUSH_FLAGS pushfd
+ %define MY_POP_FLAGS popfd
+ %define MY_PUSH_FLAGS_SIZE 4
+
+ %macro MY_PUSH_ALL 0
+ push eBP
+ mov xBP, xSP
+ push eax
+ push ebx
+ push ecx
+ push edx
+ push esi
+ push edi
+ pushfd
+ %endm
+ %macro MY_POP_ALL 0
+ popfd
+ pop edi
+ pop esi
+ pop edx
+ pop ecx
+ pop ebx
+ pop eax
+ pop ebp
+ %endm
+%endif
+
+
+
+;*******************************************************************************
+;* Internal Functions *
+;*******************************************************************************
+
+VBINSTST_BEGINCODE
+
+;;
+; Report bad register value.
+;
+; Primary purpose is save all registers and convert from our stack-based to
+; the correct calling convention for the environment.
+;
+; This function will clean up the stack upon return (to save space in the caller).
+;
+; @param uExpected
+; @param uActual
+; @param uRegisterNo
+;
+VBINSTST_BEGINPROC Common_BadValue
+ MY_PUSH_ALL
+ mov xAX, xSP ; 16-byte align the stack and reserve space for arguments and stuff.
+ sub xSP, 40h
+ and xSP, ~15
+ mov [xSP + 38h], xAX
+
+%ifdef ASM_CALL64_GCC
+ mov r8d, [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) wrt rip]
+ mov rcx, [rbp + 10h] ; expected
+ mov rdx, [rbp + 18h] ; actual
+ mov rsi, [rbp + 20h] ; reg#
+ lea rdi, [.szFmt wrt rip]
+ VBINSTST_CALL_FN_FAILURE_4
+
+%elifdef ASM_CALL64_MSC
+ mov r10d, [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) wrt rip]
+ mov [rsp + 20h], r10
+ mov r9, [rbp + 10h] ; expected
+ mov r8, [rbp + 18h] ; actual
+ mov rdx, [rbp + 20h] ; reg#
+ lea rcx, [.szFmt wrt rip]
+ VBINSTST_CALL_FN_FAILURE_4
+
+%elifdef ASM_CALL64_BS2
+ mov sBX, [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP]
+ mov sCX, [xBP + xCB + xCB] ; expected
+ mov sAX, [xBP + xCB + xCB + sCB*1] ; actual
+ mov sDX, [xBP + xCB + xCB + sCB*2] ; reg#
+ lea sSI, [.szFmt xWrtRIP]
+ mov qword [xSP + xCB + 3*sCB], sBX
+ mov qword [xSP + xCB + 2*sCB], sCX
+ mov qword [xSP + xCB + 1*sCB], sAX
+ mov qword [xSP + xCB], sDX
+ mov [xSP], sSI
+ VBINSTST_CALL_FN_FAILURE_4
+
+%else
+ mov sBX, [VBINSTST_NAME(g_uVBInsTstSubTestIndicator)]
+ mov sCX, [xBP + xCB + xCB] ; expected
+ mov sAX, [xBP + xCB + xCB + sCB*1] ; actual
+ mov sDX, [xBP + xCB + xCB + sCB*2] ; reg#
+ mov [xSP + xCB + 3*sCB], sBX
+ mov [xSP + xCB + 2*sCB], sCX
+ mov [xSP + xCB + 1*sCB], sAX
+ mov [xSP + xCB], sDX
+ mov [xSP], RTCCPTR_PRE .szFmt
+ VBINSTST_CALL_FN_FAILURE_4
+%endif
+
+ mov xSP, [xSP + 38h]
+ MY_POP_ALL
+ ret 3*sCB
+%if ARCH_BITS == 64
+.szFmt: db 'Bad register 0x%RX32 value 0x%RX64, expected 0x%RX64 (line %RU64)', 13, 0
+%else
+.szFmt: db 'Bad register 0x%RX32 value 0x%RX32, expected 0x%RX32 (line %RU32)', 13, 0
+%endif
+VBINSTST_ENDPROC Common_BadValue
+
+
+%ifdef VBINSTST_CAN_DO_TRAPS
+
+;;
+; Report a missing TRAP.
+;
+; Primary purpose is save all registers and convert from our stack-based to
+; the correct calling convention for the environment.
+;
+; This function will clean up the stack upon return (to save space in the caller).
+;
+; @param uExpected
+;
+VBINSTST_BEGINPROC Common_MissingTrap
+ MY_PUSH_ALL
+ mov xAX, xSP ; 16-byte align the stack and reserve space for arguments and stuff.
+ sub xSP, 40h
+ and xSP, ~15
+ mov [xSP + 38h], xAX
+
+ %ifdef ASM_CALL64_GCC
+ mov rdx, [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) wrt rip]
+ movzx rsi, byte [rbp + 10h] ; expected
+ lea rdi, [.szFmt wrt rip]
+ VBINSTST_CALL_FN_FAILURE_2
+
+ %elifdef ASM_CALL64_MSC
+ mov r8d, [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) wrt rip]
+ movzx rdx, byte [rbp + 10h] ; expected
+ lea rcx, [.szFmt wrt rip]
+ VBINSTST_CALL_FN_FAILURE_2
+
+ %elifdef ASM_CALL64_BS2
+ mov sBX, [VBINSTST_NAME(g_uVBInsTstSubTestIndicator) xWrtRIP]
+ mov sDX, [xBP + xCB + xCB] ; expected
+ lea sSI, [.szFmt xWrtRIP]
+ mov qword [xSP + xCB + 1*sCB], sBX
+ mov qword [xSP + xCB], sDX
+ mov [xSP], sSI
+ VBINSTST_CALL_FN_FAILURE_2
+
+ %else
+ mov sBX, [VBINSTST_NAME(g_uVBInsTstSubTestIndicator)]
+ mov sDX, [xBP + xCB + xCB] ; expected
+ mov [xSP + xCB + 1*sCB], sBX
+ mov [xSP + xCB], sDX
+ mov [xSP], RTCCPTR_PRE .szFmt
+ VBINSTST_CALL_FN_FAILURE_2
+ %endif
+
+ mov xSP, [xSP + 38h]
+ MY_POP_ALL
+ ret 1*sCB
+ %if ARCH_BITS == 64
+.szFmt: db 'Missing trap %RX8 (line %RU64)', 13, 0
+ %else
+.szFmt: db 'Missing trap %RX8 (line %RU32)', 13, 0
+ %endif
+VBINSTST_ENDPROC Common_MissingTrap
+
+ %macro Common_MissingTrapTemplate 1
+ VBINSTST_BEGINPROC Common_MissingTrap_%1
+ push %1
+ call VBINSTST_NAME(Common_MissingTrap)
+ ret
+ VBINSTST_ENDPROC Common_MissingTrap_%1
+ %endmacro
+ Common_MissingTrapTemplate X86_XCPT_DE
+ Common_MissingTrapTemplate X86_XCPT_DB
+ Common_MissingTrapTemplate X86_XCPT_NMI
+ Common_MissingTrapTemplate X86_XCPT_BP
+ Common_MissingTrapTemplate X86_XCPT_OF
+ Common_MissingTrapTemplate X86_XCPT_BR
+ Common_MissingTrapTemplate X86_XCPT_UD
+ Common_MissingTrapTemplate X86_XCPT_NM
+ ;Common_MissingTrapTemplate X86_XCPT_DF
+ ;Common_MissingTrapTemplate X86_XCPT_CO_SEG_OVERRUN
+ Common_MissingTrapTemplate X86_XCPT_TS
+ Common_MissingTrapTemplate X86_XCPT_NP
+ Common_MissingTrapTemplate X86_XCPT_SS
+ Common_MissingTrapTemplate X86_XCPT_GP
+ Common_MissingTrapTemplate X86_XCPT_PF
+ Common_MissingTrapTemplate X86_XCPT_MF
+ Common_MissingTrapTemplate X86_XCPT_AC
+ ;Common_MissingTrapTemplate X86_XCPT_MC
+ Common_MissingTrapTemplate X86_XCPT_XF
+
+%endif ; VBINSTST_CAN_DO_TRAPS
+
+
+;
+; Global data variables used by Common_SetupMemReadUxx.
+; For address calculation reasons, these must be qword aligned.
+;
+VBINSTST_BEGINDATA
+ align 64
+ dd 09d8af498h, 09ab3e5f8h
+VBINSTST_GLOBALNAME_EX g_u64Data, data hidden
+ dq 0
+ dd 07d7af797h, 096b36562h
+VBINSTST_GLOBALNAME_EX g_u32Data, data hidden
+ dd 0
+ dd 012305987h
+VBINSTST_GLOBALNAME_EX g_u16Data, data hidden
+ dw 0
+ dw 05865h
+ dw 03863h
+ dw 02679h
+VBINSTST_GLOBALNAME_EX g_u8Data, data hidden
+ db 0
+ db 90h
+ dw 0865ah
+ dd 058daffe2h
+
+VBINSTST_BEGINCODE
+
+;;
+; Sets up g_u8Data.
+; @param uValue
+VBINSTST_BEGINPROC Common_SetupMemReadU8
+ push sAX
+ mov ax, [xSP + sCB + xCB]
+ mov [VBINSTST_NAME(g_u8Data) xWrtRIP], ax
+ pop sAX
+ ret sCB
+VBINSTST_ENDPROC Common_SetupMemReadU8
+
+;;
+; Sets up g_u16Data.
+; @param uValue
+VBINSTST_BEGINPROC Common_SetupMemReadU16
+ push sAX
+ mov ax, [xSP + sCB + xCB]
+ mov [VBINSTST_NAME(g_u16Data) xWrtRIP], ax
+ pop sAX
+ ret sCB
+VBINSTST_ENDPROC Common_SetupMemReadU16
+
+;;
+; Sets up g_u32Data.
+; @param uValue
+VBINSTST_BEGINPROC Common_SetupMemReadU32
+ push sAX
+ mov eax, [xSP + sCB + xCB]
+ mov [VBINSTST_NAME(g_u32Data) xWrtRIP], eax
+ pop sAX
+ ret sCB
+VBINSTST_ENDPROC Common_SetupMemReadU32
+
+;;
+; Sets up g_u64Data.
+; @param uValue
+VBINSTST_BEGINPROC Common_SetupMemReadU64
+ push sAX
+%ifdef RT_ARCH_AMD64
+ mov rax, [xSP + sCB + xCB]
+ mov [VBINSTST_NAME(g_u64Data) xWrtRIP], rax
+%else
+ mov eax, [xSP + sCB + xCB]
+ mov [VBINSTST_NAME(g_u64Data) xWrtRIP], eax
+ mov eax, [xSP + sCB + xCB + 4]
+ mov [VBINSTST_NAME(g_u64Data) + 4 xWrtRIP], eax
+%endif
+ pop sAX
+ ret sCB
+VBINSTST_ENDPROC Common_SetupMemReadU64
+
+
+%endif
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-iprt-r3-32.mac b/src/VBox/VMM/testcase/Instructions/env-iprt-r3-32.mac
new file mode 100644
index 00000000..94afc032
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-iprt-r3-32.mac
@@ -0,0 +1,19 @@
+; $Id: env-iprt-r3-32.mac $
+;; @file
+; Instruction Test Environment - IPRT, Ring-3, 32-Bit.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%include "env-iprt-r3.mac"
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-iprt-r3-64.mac b/src/VBox/VMM/testcase/Instructions/env-iprt-r3-64.mac
new file mode 100644
index 00000000..11f1b351
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-iprt-r3-64.mac
@@ -0,0 +1,19 @@
+; $Id: env-iprt-r3-64.mac $
+;; @file
+; Instruction Test Environment - IPRT, Ring-3, 64-Bit.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%include "env-iprt-r3.mac"
+
diff --git a/src/VBox/VMM/testcase/Instructions/env-iprt-r3.mac b/src/VBox/VMM/testcase/Instructions/env-iprt-r3.mac
new file mode 100644
index 00000000..80b42b93
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/env-iprt-r3.mac
@@ -0,0 +1,99 @@
+; $Id: env-iprt-r3.mac $
+;; @file
+; Instruction Test Environment - IPRT, Ring-3, 32-bit and 64-bit.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%ifndef ___env_iprt_r3_mac
+%define ___env_iprt_r3_mac
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "iprt/asmdefs.mac"
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+%define sAX xAX
+%define sBX xBX
+%define sCX xCX
+%define sDX xDX
+%define sSP xSP
+%define sBP xBP
+%define sSI xSI
+%define sDI xDI
+%define sCB xCB
+
+
+;; Same as BEGINPROC in asmdefs.mac.
+%macro VBINSTST_BEGINPROC 1
+BEGINPROC %1
+%endm
+
+;; Same as ENDPROC in asmdefs.mac.
+%macro VBINSTST_ENDPROC 1
+ENDPROC %1
+%endm
+
+;; Same as NAME in asmdefs.mac.
+%define VBINSTST_NAME(a_Name) NAME(a_Name)
+
+;; Same as GLOBALNAME_EX in asmdefs.mac.
+%define VBINSTST_GLOBALNAME_EX GLOBALNAME_EX
+
+;; Same as BEGINCODE in asmdefs.mac.
+%define VBINSTST_BEGINCODE BEGINCODE
+
+;; Same as BEGINDATA in asmdefs.mac.
+%define VBINSTST_BEGINDATA BEGINDATA
+
+
+;; Call RTTestISub like function.
+%define VBINSTST_CALL_FN_SUB_TEST call IMP2(RTTestISub)
+EXTERN_IMP2 RTTestISub
+
+;; Call RTTestIFailure like function with simple message.
+%define VBINSTST_CALL_FN_FAILURE call NAME(VBInsTstFailure)
+extern NAME(VBInsTstFailure)
+
+;; Call RTTestIFailure like function with format message + 1 arg.
+%define VBINSTST_CALL_FN_FAILURE_1 call NAME(VBInsTstFailure1)
+extern NAME(VBInsTstFailure1)
+
+;; Call RTTestIFailure like function with format message + 2 args.
+%define VBINSTST_CALL_FN_FAILURE_2 call NAME(VBInsTstFailure2)
+extern NAME(VBInsTstFailure2)
+
+;; Call RTTestIFailure like function with format message + 3 args.
+%define VBINSTST_CALL_FN_FAILURE_3 call NAME(VBInsTstFailure3)
+extern NAME(VBInsTstFailure3)
+
+;; Call RTTestIFailure like function with format message + 4 args.
+%define VBINSTST_CALL_FN_FAILURE_4 call NAME(VBInsTstFailure4)
+extern NAME(VBInsTstFailure4)
+
+
+;; Cannot do traps yet.
+%undef VBINSTST_CAN_DO_TRAPS
+
+
+;
+; Include the common bits (contains code using above macros)
+;
+%include "env-common.mac"
+
+%endif
+
diff --git a/src/VBox/VMM/testcase/Instructions/itgTableDaa.py b/src/VBox/VMM/testcase/Instructions/itgTableDaa.py
new file mode 100644
index 00000000..3aab0f10
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/itgTableDaa.py
@@ -0,0 +1,1105 @@
+# -*- coding: utf-8 -*-
+# $Id: itgTableDaa.py $
+
+"""
+DAA (instruction) result table.
+"""
+
+
+__copyright__ = \
+"""
+Copyright (C) 2012-2020 Oracle Corporation
+
+This file is part of VirtualBox Open Source Edition (OSE), as
+available from http://www.virtualbox.org. This file is free software;
+you can redistribute it and/or modify it under the terms of the GNU
+General Public License (GPL) as published by the Free Software
+Foundation, in version 2 as it comes in the "COPYING" file of the
+VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+"""
+__version__ = "$Revision: 135976 $";
+
+
+## The 32-bit GCC (C99) program that produced the table below.
+g_sItgCProgramDaa = \
+"""
+#include <stdio.h>
+
+int main()
+{
+ for (unsigned uInputAL = 0; uInputAL < 256; uInputAL++)
+ for (unsigned fAux = 0; fAux < 2; fAux++)
+ for (unsigned fCarry = 0; fCarry < 2; fCarry++)
+ {
+ unsigned uInputEFlags = fCarry | (fAux << 4);
+ unsigned uResultAL;
+ unsigned uResultEFlags;
+ __asm__ __volatile__("pushl %1\\n"
+ "popfl\\n"
+ "daa\\n"
+ "pushf\\n"
+ "pop %1\\n"
+ : "=a" (uResultAL),
+ "=r" (uResultEFlags)
+ : "0" (uInputAL),
+ "1" (uInputEFlags)
+ : "memory"
+ );
+ printf(" ( 0x%02x, 0x%02x ), # AL=0x%02x, AF=%u CF=%u\\n",
+ uResultAL, uResultEFlags & 0xd5, uInputAL, fAux, fCarry);
+ /* 0xd5 = CF, PF, AF, ZF, SF */
+ }
+ return 0;
+}
+""";
+
+
+#
+# Compile and run the above program if requested to do so.
+#
+if __name__ == '__main__':
+ import sys;
+ if len(sys.argv) > 1 and sys.argv[1] == 'gen':
+ import subprocess;
+ oProc = subprocess.Popen(['gcc', '-x', 'c', '-std=gnu99', '-m32', '-o', './itgTableDaa', '-'], stdin = subprocess.PIPE);
+ oProc.communicate(g_sItgCProgramDaa);
+ oProc.wait();
+ oProc = subprocess.Popen(['./itgTableDaa',]).wait();
+ sys.exit(0);
+
+
+
+##
+# The DAA results.
+#
+# The index / input relation is: index = (AL << 2) | (CF << 1) | AF
+#
+g_aItgDaaResults = \
+[
+ ( 0x00, 0x44 ), # AL=0x00, AF=0 CF=0
+ ( 0x60, 0x05 ), # AL=0x00, AF=0 CF=1
+ ( 0x06, 0x14 ), # AL=0x00, AF=1 CF=0
+ ( 0x66, 0x15 ), # AL=0x00, AF=1 CF=1
+ ( 0x01, 0x00 ), # AL=0x01, AF=0 CF=0
+ ( 0x61, 0x01 ), # AL=0x01, AF=0 CF=1
+ ( 0x07, 0x10 ), # AL=0x01, AF=1 CF=0
+ ( 0x67, 0x11 ), # AL=0x01, AF=1 CF=1
+ ( 0x02, 0x00 ), # AL=0x02, AF=0 CF=0
+ ( 0x62, 0x01 ), # AL=0x02, AF=0 CF=1
+ ( 0x08, 0x10 ), # AL=0x02, AF=1 CF=0
+ ( 0x68, 0x11 ), # AL=0x02, AF=1 CF=1
+ ( 0x03, 0x04 ), # AL=0x03, AF=0 CF=0
+ ( 0x63, 0x05 ), # AL=0x03, AF=0 CF=1
+ ( 0x09, 0x14 ), # AL=0x03, AF=1 CF=0
+ ( 0x69, 0x15 ), # AL=0x03, AF=1 CF=1
+ ( 0x04, 0x00 ), # AL=0x04, AF=0 CF=0
+ ( 0x64, 0x01 ), # AL=0x04, AF=0 CF=1
+ ( 0x0a, 0x14 ), # AL=0x04, AF=1 CF=0
+ ( 0x6a, 0x15 ), # AL=0x04, AF=1 CF=1
+ ( 0x05, 0x04 ), # AL=0x05, AF=0 CF=0
+ ( 0x65, 0x05 ), # AL=0x05, AF=0 CF=1
+ ( 0x0b, 0x10 ), # AL=0x05, AF=1 CF=0
+ ( 0x6b, 0x11 ), # AL=0x05, AF=1 CF=1
+ ( 0x06, 0x04 ), # AL=0x06, AF=0 CF=0
+ ( 0x66, 0x05 ), # AL=0x06, AF=0 CF=1
+ ( 0x0c, 0x14 ), # AL=0x06, AF=1 CF=0
+ ( 0x6c, 0x15 ), # AL=0x06, AF=1 CF=1
+ ( 0x07, 0x00 ), # AL=0x07, AF=0 CF=0
+ ( 0x67, 0x01 ), # AL=0x07, AF=0 CF=1
+ ( 0x0d, 0x10 ), # AL=0x07, AF=1 CF=0
+ ( 0x6d, 0x11 ), # AL=0x07, AF=1 CF=1
+ ( 0x08, 0x00 ), # AL=0x08, AF=0 CF=0
+ ( 0x68, 0x01 ), # AL=0x08, AF=0 CF=1
+ ( 0x0e, 0x10 ), # AL=0x08, AF=1 CF=0
+ ( 0x6e, 0x11 ), # AL=0x08, AF=1 CF=1
+ ( 0x09, 0x04 ), # AL=0x09, AF=0 CF=0
+ ( 0x69, 0x05 ), # AL=0x09, AF=0 CF=1
+ ( 0x0f, 0x14 ), # AL=0x09, AF=1 CF=0
+ ( 0x6f, 0x15 ), # AL=0x09, AF=1 CF=1
+ ( 0x10, 0x10 ), # AL=0x0a, AF=0 CF=0
+ ( 0x70, 0x11 ), # AL=0x0a, AF=0 CF=1
+ ( 0x10, 0x10 ), # AL=0x0a, AF=1 CF=0
+ ( 0x70, 0x11 ), # AL=0x0a, AF=1 CF=1
+ ( 0x11, 0x14 ), # AL=0x0b, AF=0 CF=0
+ ( 0x71, 0x15 ), # AL=0x0b, AF=0 CF=1
+ ( 0x11, 0x14 ), # AL=0x0b, AF=1 CF=0
+ ( 0x71, 0x15 ), # AL=0x0b, AF=1 CF=1
+ ( 0x12, 0x14 ), # AL=0x0c, AF=0 CF=0
+ ( 0x72, 0x15 ), # AL=0x0c, AF=0 CF=1
+ ( 0x12, 0x14 ), # AL=0x0c, AF=1 CF=0
+ ( 0x72, 0x15 ), # AL=0x0c, AF=1 CF=1
+ ( 0x13, 0x10 ), # AL=0x0d, AF=0 CF=0
+ ( 0x73, 0x11 ), # AL=0x0d, AF=0 CF=1
+ ( 0x13, 0x10 ), # AL=0x0d, AF=1 CF=0
+ ( 0x73, 0x11 ), # AL=0x0d, AF=1 CF=1
+ ( 0x14, 0x14 ), # AL=0x0e, AF=0 CF=0
+ ( 0x74, 0x15 ), # AL=0x0e, AF=0 CF=1
+ ( 0x14, 0x14 ), # AL=0x0e, AF=1 CF=0
+ ( 0x74, 0x15 ), # AL=0x0e, AF=1 CF=1
+ ( 0x15, 0x10 ), # AL=0x0f, AF=0 CF=0
+ ( 0x75, 0x11 ), # AL=0x0f, AF=0 CF=1
+ ( 0x15, 0x10 ), # AL=0x0f, AF=1 CF=0
+ ( 0x75, 0x11 ), # AL=0x0f, AF=1 CF=1
+ ( 0x10, 0x00 ), # AL=0x10, AF=0 CF=0
+ ( 0x70, 0x01 ), # AL=0x10, AF=0 CF=1
+ ( 0x16, 0x10 ), # AL=0x10, AF=1 CF=0
+ ( 0x76, 0x11 ), # AL=0x10, AF=1 CF=1
+ ( 0x11, 0x04 ), # AL=0x11, AF=0 CF=0
+ ( 0x71, 0x05 ), # AL=0x11, AF=0 CF=1
+ ( 0x17, 0x14 ), # AL=0x11, AF=1 CF=0
+ ( 0x77, 0x15 ), # AL=0x11, AF=1 CF=1
+ ( 0x12, 0x04 ), # AL=0x12, AF=0 CF=0
+ ( 0x72, 0x05 ), # AL=0x12, AF=0 CF=1
+ ( 0x18, 0x14 ), # AL=0x12, AF=1 CF=0
+ ( 0x78, 0x15 ), # AL=0x12, AF=1 CF=1
+ ( 0x13, 0x00 ), # AL=0x13, AF=0 CF=0
+ ( 0x73, 0x01 ), # AL=0x13, AF=0 CF=1
+ ( 0x19, 0x10 ), # AL=0x13, AF=1 CF=0
+ ( 0x79, 0x11 ), # AL=0x13, AF=1 CF=1
+ ( 0x14, 0x04 ), # AL=0x14, AF=0 CF=0
+ ( 0x74, 0x05 ), # AL=0x14, AF=0 CF=1
+ ( 0x1a, 0x10 ), # AL=0x14, AF=1 CF=0
+ ( 0x7a, 0x11 ), # AL=0x14, AF=1 CF=1
+ ( 0x15, 0x00 ), # AL=0x15, AF=0 CF=0
+ ( 0x75, 0x01 ), # AL=0x15, AF=0 CF=1
+ ( 0x1b, 0x14 ), # AL=0x15, AF=1 CF=0
+ ( 0x7b, 0x15 ), # AL=0x15, AF=1 CF=1
+ ( 0x16, 0x00 ), # AL=0x16, AF=0 CF=0
+ ( 0x76, 0x01 ), # AL=0x16, AF=0 CF=1
+ ( 0x1c, 0x10 ), # AL=0x16, AF=1 CF=0
+ ( 0x7c, 0x11 ), # AL=0x16, AF=1 CF=1
+ ( 0x17, 0x04 ), # AL=0x17, AF=0 CF=0
+ ( 0x77, 0x05 ), # AL=0x17, AF=0 CF=1
+ ( 0x1d, 0x14 ), # AL=0x17, AF=1 CF=0
+ ( 0x7d, 0x15 ), # AL=0x17, AF=1 CF=1
+ ( 0x18, 0x04 ), # AL=0x18, AF=0 CF=0
+ ( 0x78, 0x05 ), # AL=0x18, AF=0 CF=1
+ ( 0x1e, 0x14 ), # AL=0x18, AF=1 CF=0
+ ( 0x7e, 0x15 ), # AL=0x18, AF=1 CF=1
+ ( 0x19, 0x00 ), # AL=0x19, AF=0 CF=0
+ ( 0x79, 0x01 ), # AL=0x19, AF=0 CF=1
+ ( 0x1f, 0x10 ), # AL=0x19, AF=1 CF=0
+ ( 0x7f, 0x11 ), # AL=0x19, AF=1 CF=1
+ ( 0x20, 0x10 ), # AL=0x1a, AF=0 CF=0
+ ( 0x80, 0x91 ), # AL=0x1a, AF=0 CF=1
+ ( 0x20, 0x10 ), # AL=0x1a, AF=1 CF=0
+ ( 0x80, 0x91 ), # AL=0x1a, AF=1 CF=1
+ ( 0x21, 0x14 ), # AL=0x1b, AF=0 CF=0
+ ( 0x81, 0x95 ), # AL=0x1b, AF=0 CF=1
+ ( 0x21, 0x14 ), # AL=0x1b, AF=1 CF=0
+ ( 0x81, 0x95 ), # AL=0x1b, AF=1 CF=1
+ ( 0x22, 0x14 ), # AL=0x1c, AF=0 CF=0
+ ( 0x82, 0x95 ), # AL=0x1c, AF=0 CF=1
+ ( 0x22, 0x14 ), # AL=0x1c, AF=1 CF=0
+ ( 0x82, 0x95 ), # AL=0x1c, AF=1 CF=1
+ ( 0x23, 0x10 ), # AL=0x1d, AF=0 CF=0
+ ( 0x83, 0x91 ), # AL=0x1d, AF=0 CF=1
+ ( 0x23, 0x10 ), # AL=0x1d, AF=1 CF=0
+ ( 0x83, 0x91 ), # AL=0x1d, AF=1 CF=1
+ ( 0x24, 0x14 ), # AL=0x1e, AF=0 CF=0
+ ( 0x84, 0x95 ), # AL=0x1e, AF=0 CF=1
+ ( 0x24, 0x14 ), # AL=0x1e, AF=1 CF=0
+ ( 0x84, 0x95 ), # AL=0x1e, AF=1 CF=1
+ ( 0x25, 0x10 ), # AL=0x1f, AF=0 CF=0
+ ( 0x85, 0x91 ), # AL=0x1f, AF=0 CF=1
+ ( 0x25, 0x10 ), # AL=0x1f, AF=1 CF=0
+ ( 0x85, 0x91 ), # AL=0x1f, AF=1 CF=1
+ ( 0x20, 0x00 ), # AL=0x20, AF=0 CF=0
+ ( 0x80, 0x81 ), # AL=0x20, AF=0 CF=1
+ ( 0x26, 0x10 ), # AL=0x20, AF=1 CF=0
+ ( 0x86, 0x91 ), # AL=0x20, AF=1 CF=1
+ ( 0x21, 0x04 ), # AL=0x21, AF=0 CF=0
+ ( 0x81, 0x85 ), # AL=0x21, AF=0 CF=1
+ ( 0x27, 0x14 ), # AL=0x21, AF=1 CF=0
+ ( 0x87, 0x95 ), # AL=0x21, AF=1 CF=1
+ ( 0x22, 0x04 ), # AL=0x22, AF=0 CF=0
+ ( 0x82, 0x85 ), # AL=0x22, AF=0 CF=1
+ ( 0x28, 0x14 ), # AL=0x22, AF=1 CF=0
+ ( 0x88, 0x95 ), # AL=0x22, AF=1 CF=1
+ ( 0x23, 0x00 ), # AL=0x23, AF=0 CF=0
+ ( 0x83, 0x81 ), # AL=0x23, AF=0 CF=1
+ ( 0x29, 0x10 ), # AL=0x23, AF=1 CF=0
+ ( 0x89, 0x91 ), # AL=0x23, AF=1 CF=1
+ ( 0x24, 0x04 ), # AL=0x24, AF=0 CF=0
+ ( 0x84, 0x85 ), # AL=0x24, AF=0 CF=1
+ ( 0x2a, 0x10 ), # AL=0x24, AF=1 CF=0
+ ( 0x8a, 0x91 ), # AL=0x24, AF=1 CF=1
+ ( 0x25, 0x00 ), # AL=0x25, AF=0 CF=0
+ ( 0x85, 0x81 ), # AL=0x25, AF=0 CF=1
+ ( 0x2b, 0x14 ), # AL=0x25, AF=1 CF=0
+ ( 0x8b, 0x95 ), # AL=0x25, AF=1 CF=1
+ ( 0x26, 0x00 ), # AL=0x26, AF=0 CF=0
+ ( 0x86, 0x81 ), # AL=0x26, AF=0 CF=1
+ ( 0x2c, 0x10 ), # AL=0x26, AF=1 CF=0
+ ( 0x8c, 0x91 ), # AL=0x26, AF=1 CF=1
+ ( 0x27, 0x04 ), # AL=0x27, AF=0 CF=0
+ ( 0x87, 0x85 ), # AL=0x27, AF=0 CF=1
+ ( 0x2d, 0x14 ), # AL=0x27, AF=1 CF=0
+ ( 0x8d, 0x95 ), # AL=0x27, AF=1 CF=1
+ ( 0x28, 0x04 ), # AL=0x28, AF=0 CF=0
+ ( 0x88, 0x85 ), # AL=0x28, AF=0 CF=1
+ ( 0x2e, 0x14 ), # AL=0x28, AF=1 CF=0
+ ( 0x8e, 0x95 ), # AL=0x28, AF=1 CF=1
+ ( 0x29, 0x00 ), # AL=0x29, AF=0 CF=0
+ ( 0x89, 0x81 ), # AL=0x29, AF=0 CF=1
+ ( 0x2f, 0x10 ), # AL=0x29, AF=1 CF=0
+ ( 0x8f, 0x91 ), # AL=0x29, AF=1 CF=1
+ ( 0x30, 0x14 ), # AL=0x2a, AF=0 CF=0
+ ( 0x90, 0x95 ), # AL=0x2a, AF=0 CF=1
+ ( 0x30, 0x14 ), # AL=0x2a, AF=1 CF=0
+ ( 0x90, 0x95 ), # AL=0x2a, AF=1 CF=1
+ ( 0x31, 0x10 ), # AL=0x2b, AF=0 CF=0
+ ( 0x91, 0x91 ), # AL=0x2b, AF=0 CF=1
+ ( 0x31, 0x10 ), # AL=0x2b, AF=1 CF=0
+ ( 0x91, 0x91 ), # AL=0x2b, AF=1 CF=1
+ ( 0x32, 0x10 ), # AL=0x2c, AF=0 CF=0
+ ( 0x92, 0x91 ), # AL=0x2c, AF=0 CF=1
+ ( 0x32, 0x10 ), # AL=0x2c, AF=1 CF=0
+ ( 0x92, 0x91 ), # AL=0x2c, AF=1 CF=1
+ ( 0x33, 0x14 ), # AL=0x2d, AF=0 CF=0
+ ( 0x93, 0x95 ), # AL=0x2d, AF=0 CF=1
+ ( 0x33, 0x14 ), # AL=0x2d, AF=1 CF=0
+ ( 0x93, 0x95 ), # AL=0x2d, AF=1 CF=1
+ ( 0x34, 0x10 ), # AL=0x2e, AF=0 CF=0
+ ( 0x94, 0x91 ), # AL=0x2e, AF=0 CF=1
+ ( 0x34, 0x10 ), # AL=0x2e, AF=1 CF=0
+ ( 0x94, 0x91 ), # AL=0x2e, AF=1 CF=1
+ ( 0x35, 0x14 ), # AL=0x2f, AF=0 CF=0
+ ( 0x95, 0x95 ), # AL=0x2f, AF=0 CF=1
+ ( 0x35, 0x14 ), # AL=0x2f, AF=1 CF=0
+ ( 0x95, 0x95 ), # AL=0x2f, AF=1 CF=1
+ ( 0x30, 0x04 ), # AL=0x30, AF=0 CF=0
+ ( 0x90, 0x85 ), # AL=0x30, AF=0 CF=1
+ ( 0x36, 0x14 ), # AL=0x30, AF=1 CF=0
+ ( 0x96, 0x95 ), # AL=0x30, AF=1 CF=1
+ ( 0x31, 0x00 ), # AL=0x31, AF=0 CF=0
+ ( 0x91, 0x81 ), # AL=0x31, AF=0 CF=1
+ ( 0x37, 0x10 ), # AL=0x31, AF=1 CF=0
+ ( 0x97, 0x91 ), # AL=0x31, AF=1 CF=1
+ ( 0x32, 0x00 ), # AL=0x32, AF=0 CF=0
+ ( 0x92, 0x81 ), # AL=0x32, AF=0 CF=1
+ ( 0x38, 0x10 ), # AL=0x32, AF=1 CF=0
+ ( 0x98, 0x91 ), # AL=0x32, AF=1 CF=1
+ ( 0x33, 0x04 ), # AL=0x33, AF=0 CF=0
+ ( 0x93, 0x85 ), # AL=0x33, AF=0 CF=1
+ ( 0x39, 0x14 ), # AL=0x33, AF=1 CF=0
+ ( 0x99, 0x95 ), # AL=0x33, AF=1 CF=1
+ ( 0x34, 0x00 ), # AL=0x34, AF=0 CF=0
+ ( 0x94, 0x81 ), # AL=0x34, AF=0 CF=1
+ ( 0x3a, 0x14 ), # AL=0x34, AF=1 CF=0
+ ( 0x9a, 0x95 ), # AL=0x34, AF=1 CF=1
+ ( 0x35, 0x04 ), # AL=0x35, AF=0 CF=0
+ ( 0x95, 0x85 ), # AL=0x35, AF=0 CF=1
+ ( 0x3b, 0x10 ), # AL=0x35, AF=1 CF=0
+ ( 0x9b, 0x91 ), # AL=0x35, AF=1 CF=1
+ ( 0x36, 0x04 ), # AL=0x36, AF=0 CF=0
+ ( 0x96, 0x85 ), # AL=0x36, AF=0 CF=1
+ ( 0x3c, 0x14 ), # AL=0x36, AF=1 CF=0
+ ( 0x9c, 0x95 ), # AL=0x36, AF=1 CF=1
+ ( 0x37, 0x00 ), # AL=0x37, AF=0 CF=0
+ ( 0x97, 0x81 ), # AL=0x37, AF=0 CF=1
+ ( 0x3d, 0x10 ), # AL=0x37, AF=1 CF=0
+ ( 0x9d, 0x91 ), # AL=0x37, AF=1 CF=1
+ ( 0x38, 0x00 ), # AL=0x38, AF=0 CF=0
+ ( 0x98, 0x81 ), # AL=0x38, AF=0 CF=1
+ ( 0x3e, 0x10 ), # AL=0x38, AF=1 CF=0
+ ( 0x9e, 0x91 ), # AL=0x38, AF=1 CF=1
+ ( 0x39, 0x04 ), # AL=0x39, AF=0 CF=0
+ ( 0x99, 0x85 ), # AL=0x39, AF=0 CF=1
+ ( 0x3f, 0x14 ), # AL=0x39, AF=1 CF=0
+ ( 0x9f, 0x95 ), # AL=0x39, AF=1 CF=1
+ ( 0x40, 0x10 ), # AL=0x3a, AF=0 CF=0
+ ( 0xa0, 0x95 ), # AL=0x3a, AF=0 CF=1
+ ( 0x40, 0x10 ), # AL=0x3a, AF=1 CF=0
+ ( 0xa0, 0x95 ), # AL=0x3a, AF=1 CF=1
+ ( 0x41, 0x14 ), # AL=0x3b, AF=0 CF=0
+ ( 0xa1, 0x91 ), # AL=0x3b, AF=0 CF=1
+ ( 0x41, 0x14 ), # AL=0x3b, AF=1 CF=0
+ ( 0xa1, 0x91 ), # AL=0x3b, AF=1 CF=1
+ ( 0x42, 0x14 ), # AL=0x3c, AF=0 CF=0
+ ( 0xa2, 0x91 ), # AL=0x3c, AF=0 CF=1
+ ( 0x42, 0x14 ), # AL=0x3c, AF=1 CF=0
+ ( 0xa2, 0x91 ), # AL=0x3c, AF=1 CF=1
+ ( 0x43, 0x10 ), # AL=0x3d, AF=0 CF=0
+ ( 0xa3, 0x95 ), # AL=0x3d, AF=0 CF=1
+ ( 0x43, 0x10 ), # AL=0x3d, AF=1 CF=0
+ ( 0xa3, 0x95 ), # AL=0x3d, AF=1 CF=1
+ ( 0x44, 0x14 ), # AL=0x3e, AF=0 CF=0
+ ( 0xa4, 0x91 ), # AL=0x3e, AF=0 CF=1
+ ( 0x44, 0x14 ), # AL=0x3e, AF=1 CF=0
+ ( 0xa4, 0x91 ), # AL=0x3e, AF=1 CF=1
+ ( 0x45, 0x10 ), # AL=0x3f, AF=0 CF=0
+ ( 0xa5, 0x95 ), # AL=0x3f, AF=0 CF=1
+ ( 0x45, 0x10 ), # AL=0x3f, AF=1 CF=0
+ ( 0xa5, 0x95 ), # AL=0x3f, AF=1 CF=1
+ ( 0x40, 0x00 ), # AL=0x40, AF=0 CF=0
+ ( 0xa0, 0x85 ), # AL=0x40, AF=0 CF=1
+ ( 0x46, 0x10 ), # AL=0x40, AF=1 CF=0
+ ( 0xa6, 0x95 ), # AL=0x40, AF=1 CF=1
+ ( 0x41, 0x04 ), # AL=0x41, AF=0 CF=0
+ ( 0xa1, 0x81 ), # AL=0x41, AF=0 CF=1
+ ( 0x47, 0x14 ), # AL=0x41, AF=1 CF=0
+ ( 0xa7, 0x91 ), # AL=0x41, AF=1 CF=1
+ ( 0x42, 0x04 ), # AL=0x42, AF=0 CF=0
+ ( 0xa2, 0x81 ), # AL=0x42, AF=0 CF=1
+ ( 0x48, 0x14 ), # AL=0x42, AF=1 CF=0
+ ( 0xa8, 0x91 ), # AL=0x42, AF=1 CF=1
+ ( 0x43, 0x00 ), # AL=0x43, AF=0 CF=0
+ ( 0xa3, 0x85 ), # AL=0x43, AF=0 CF=1
+ ( 0x49, 0x10 ), # AL=0x43, AF=1 CF=0
+ ( 0xa9, 0x95 ), # AL=0x43, AF=1 CF=1
+ ( 0x44, 0x04 ), # AL=0x44, AF=0 CF=0
+ ( 0xa4, 0x81 ), # AL=0x44, AF=0 CF=1
+ ( 0x4a, 0x10 ), # AL=0x44, AF=1 CF=0
+ ( 0xaa, 0x95 ), # AL=0x44, AF=1 CF=1
+ ( 0x45, 0x00 ), # AL=0x45, AF=0 CF=0
+ ( 0xa5, 0x85 ), # AL=0x45, AF=0 CF=1
+ ( 0x4b, 0x14 ), # AL=0x45, AF=1 CF=0
+ ( 0xab, 0x91 ), # AL=0x45, AF=1 CF=1
+ ( 0x46, 0x00 ), # AL=0x46, AF=0 CF=0
+ ( 0xa6, 0x85 ), # AL=0x46, AF=0 CF=1
+ ( 0x4c, 0x10 ), # AL=0x46, AF=1 CF=0
+ ( 0xac, 0x95 ), # AL=0x46, AF=1 CF=1
+ ( 0x47, 0x04 ), # AL=0x47, AF=0 CF=0
+ ( 0xa7, 0x81 ), # AL=0x47, AF=0 CF=1
+ ( 0x4d, 0x14 ), # AL=0x47, AF=1 CF=0
+ ( 0xad, 0x91 ), # AL=0x47, AF=1 CF=1
+ ( 0x48, 0x04 ), # AL=0x48, AF=0 CF=0
+ ( 0xa8, 0x81 ), # AL=0x48, AF=0 CF=1
+ ( 0x4e, 0x14 ), # AL=0x48, AF=1 CF=0
+ ( 0xae, 0x91 ), # AL=0x48, AF=1 CF=1
+ ( 0x49, 0x00 ), # AL=0x49, AF=0 CF=0
+ ( 0xa9, 0x85 ), # AL=0x49, AF=0 CF=1
+ ( 0x4f, 0x10 ), # AL=0x49, AF=1 CF=0
+ ( 0xaf, 0x95 ), # AL=0x49, AF=1 CF=1
+ ( 0x50, 0x14 ), # AL=0x4a, AF=0 CF=0
+ ( 0xb0, 0x91 ), # AL=0x4a, AF=0 CF=1
+ ( 0x50, 0x14 ), # AL=0x4a, AF=1 CF=0
+ ( 0xb0, 0x91 ), # AL=0x4a, AF=1 CF=1
+ ( 0x51, 0x10 ), # AL=0x4b, AF=0 CF=0
+ ( 0xb1, 0x95 ), # AL=0x4b, AF=0 CF=1
+ ( 0x51, 0x10 ), # AL=0x4b, AF=1 CF=0
+ ( 0xb1, 0x95 ), # AL=0x4b, AF=1 CF=1
+ ( 0x52, 0x10 ), # AL=0x4c, AF=0 CF=0
+ ( 0xb2, 0x95 ), # AL=0x4c, AF=0 CF=1
+ ( 0x52, 0x10 ), # AL=0x4c, AF=1 CF=0
+ ( 0xb2, 0x95 ), # AL=0x4c, AF=1 CF=1
+ ( 0x53, 0x14 ), # AL=0x4d, AF=0 CF=0
+ ( 0xb3, 0x91 ), # AL=0x4d, AF=0 CF=1
+ ( 0x53, 0x14 ), # AL=0x4d, AF=1 CF=0
+ ( 0xb3, 0x91 ), # AL=0x4d, AF=1 CF=1
+ ( 0x54, 0x10 ), # AL=0x4e, AF=0 CF=0
+ ( 0xb4, 0x95 ), # AL=0x4e, AF=0 CF=1
+ ( 0x54, 0x10 ), # AL=0x4e, AF=1 CF=0
+ ( 0xb4, 0x95 ), # AL=0x4e, AF=1 CF=1
+ ( 0x55, 0x14 ), # AL=0x4f, AF=0 CF=0
+ ( 0xb5, 0x91 ), # AL=0x4f, AF=0 CF=1
+ ( 0x55, 0x14 ), # AL=0x4f, AF=1 CF=0
+ ( 0xb5, 0x91 ), # AL=0x4f, AF=1 CF=1
+ ( 0x50, 0x04 ), # AL=0x50, AF=0 CF=0
+ ( 0xb0, 0x81 ), # AL=0x50, AF=0 CF=1
+ ( 0x56, 0x14 ), # AL=0x50, AF=1 CF=0
+ ( 0xb6, 0x91 ), # AL=0x50, AF=1 CF=1
+ ( 0x51, 0x00 ), # AL=0x51, AF=0 CF=0
+ ( 0xb1, 0x85 ), # AL=0x51, AF=0 CF=1
+ ( 0x57, 0x10 ), # AL=0x51, AF=1 CF=0
+ ( 0xb7, 0x95 ), # AL=0x51, AF=1 CF=1
+ ( 0x52, 0x00 ), # AL=0x52, AF=0 CF=0
+ ( 0xb2, 0x85 ), # AL=0x52, AF=0 CF=1
+ ( 0x58, 0x10 ), # AL=0x52, AF=1 CF=0
+ ( 0xb8, 0x95 ), # AL=0x52, AF=1 CF=1
+ ( 0x53, 0x04 ), # AL=0x53, AF=0 CF=0
+ ( 0xb3, 0x81 ), # AL=0x53, AF=0 CF=1
+ ( 0x59, 0x14 ), # AL=0x53, AF=1 CF=0
+ ( 0xb9, 0x91 ), # AL=0x53, AF=1 CF=1
+ ( 0x54, 0x00 ), # AL=0x54, AF=0 CF=0
+ ( 0xb4, 0x85 ), # AL=0x54, AF=0 CF=1
+ ( 0x5a, 0x14 ), # AL=0x54, AF=1 CF=0
+ ( 0xba, 0x91 ), # AL=0x54, AF=1 CF=1
+ ( 0x55, 0x04 ), # AL=0x55, AF=0 CF=0
+ ( 0xb5, 0x81 ), # AL=0x55, AF=0 CF=1
+ ( 0x5b, 0x10 ), # AL=0x55, AF=1 CF=0
+ ( 0xbb, 0x95 ), # AL=0x55, AF=1 CF=1
+ ( 0x56, 0x04 ), # AL=0x56, AF=0 CF=0
+ ( 0xb6, 0x81 ), # AL=0x56, AF=0 CF=1
+ ( 0x5c, 0x14 ), # AL=0x56, AF=1 CF=0
+ ( 0xbc, 0x91 ), # AL=0x56, AF=1 CF=1
+ ( 0x57, 0x00 ), # AL=0x57, AF=0 CF=0
+ ( 0xb7, 0x85 ), # AL=0x57, AF=0 CF=1
+ ( 0x5d, 0x10 ), # AL=0x57, AF=1 CF=0
+ ( 0xbd, 0x95 ), # AL=0x57, AF=1 CF=1
+ ( 0x58, 0x00 ), # AL=0x58, AF=0 CF=0
+ ( 0xb8, 0x85 ), # AL=0x58, AF=0 CF=1
+ ( 0x5e, 0x10 ), # AL=0x58, AF=1 CF=0
+ ( 0xbe, 0x95 ), # AL=0x58, AF=1 CF=1
+ ( 0x59, 0x04 ), # AL=0x59, AF=0 CF=0
+ ( 0xb9, 0x81 ), # AL=0x59, AF=0 CF=1
+ ( 0x5f, 0x14 ), # AL=0x59, AF=1 CF=0
+ ( 0xbf, 0x91 ), # AL=0x59, AF=1 CF=1
+ ( 0x60, 0x14 ), # AL=0x5a, AF=0 CF=0
+ ( 0xc0, 0x95 ), # AL=0x5a, AF=0 CF=1
+ ( 0x60, 0x14 ), # AL=0x5a, AF=1 CF=0
+ ( 0xc0, 0x95 ), # AL=0x5a, AF=1 CF=1
+ ( 0x61, 0x10 ), # AL=0x5b, AF=0 CF=0
+ ( 0xc1, 0x91 ), # AL=0x5b, AF=0 CF=1
+ ( 0x61, 0x10 ), # AL=0x5b, AF=1 CF=0
+ ( 0xc1, 0x91 ), # AL=0x5b, AF=1 CF=1
+ ( 0x62, 0x10 ), # AL=0x5c, AF=0 CF=0
+ ( 0xc2, 0x91 ), # AL=0x5c, AF=0 CF=1
+ ( 0x62, 0x10 ), # AL=0x5c, AF=1 CF=0
+ ( 0xc2, 0x91 ), # AL=0x5c, AF=1 CF=1
+ ( 0x63, 0x14 ), # AL=0x5d, AF=0 CF=0
+ ( 0xc3, 0x95 ), # AL=0x5d, AF=0 CF=1
+ ( 0x63, 0x14 ), # AL=0x5d, AF=1 CF=0
+ ( 0xc3, 0x95 ), # AL=0x5d, AF=1 CF=1
+ ( 0x64, 0x10 ), # AL=0x5e, AF=0 CF=0
+ ( 0xc4, 0x91 ), # AL=0x5e, AF=0 CF=1
+ ( 0x64, 0x10 ), # AL=0x5e, AF=1 CF=0
+ ( 0xc4, 0x91 ), # AL=0x5e, AF=1 CF=1
+ ( 0x65, 0x14 ), # AL=0x5f, AF=0 CF=0
+ ( 0xc5, 0x95 ), # AL=0x5f, AF=0 CF=1
+ ( 0x65, 0x14 ), # AL=0x5f, AF=1 CF=0
+ ( 0xc5, 0x95 ), # AL=0x5f, AF=1 CF=1
+ ( 0x60, 0x04 ), # AL=0x60, AF=0 CF=0
+ ( 0xc0, 0x85 ), # AL=0x60, AF=0 CF=1
+ ( 0x66, 0x14 ), # AL=0x60, AF=1 CF=0
+ ( 0xc6, 0x95 ), # AL=0x60, AF=1 CF=1
+ ( 0x61, 0x00 ), # AL=0x61, AF=0 CF=0
+ ( 0xc1, 0x81 ), # AL=0x61, AF=0 CF=1
+ ( 0x67, 0x10 ), # AL=0x61, AF=1 CF=0
+ ( 0xc7, 0x91 ), # AL=0x61, AF=1 CF=1
+ ( 0x62, 0x00 ), # AL=0x62, AF=0 CF=0
+ ( 0xc2, 0x81 ), # AL=0x62, AF=0 CF=1
+ ( 0x68, 0x10 ), # AL=0x62, AF=1 CF=0
+ ( 0xc8, 0x91 ), # AL=0x62, AF=1 CF=1
+ ( 0x63, 0x04 ), # AL=0x63, AF=0 CF=0
+ ( 0xc3, 0x85 ), # AL=0x63, AF=0 CF=1
+ ( 0x69, 0x14 ), # AL=0x63, AF=1 CF=0
+ ( 0xc9, 0x95 ), # AL=0x63, AF=1 CF=1
+ ( 0x64, 0x00 ), # AL=0x64, AF=0 CF=0
+ ( 0xc4, 0x81 ), # AL=0x64, AF=0 CF=1
+ ( 0x6a, 0x14 ), # AL=0x64, AF=1 CF=0
+ ( 0xca, 0x95 ), # AL=0x64, AF=1 CF=1
+ ( 0x65, 0x04 ), # AL=0x65, AF=0 CF=0
+ ( 0xc5, 0x85 ), # AL=0x65, AF=0 CF=1
+ ( 0x6b, 0x10 ), # AL=0x65, AF=1 CF=0
+ ( 0xcb, 0x91 ), # AL=0x65, AF=1 CF=1
+ ( 0x66, 0x04 ), # AL=0x66, AF=0 CF=0
+ ( 0xc6, 0x85 ), # AL=0x66, AF=0 CF=1
+ ( 0x6c, 0x14 ), # AL=0x66, AF=1 CF=0
+ ( 0xcc, 0x95 ), # AL=0x66, AF=1 CF=1
+ ( 0x67, 0x00 ), # AL=0x67, AF=0 CF=0
+ ( 0xc7, 0x81 ), # AL=0x67, AF=0 CF=1
+ ( 0x6d, 0x10 ), # AL=0x67, AF=1 CF=0
+ ( 0xcd, 0x91 ), # AL=0x67, AF=1 CF=1
+ ( 0x68, 0x00 ), # AL=0x68, AF=0 CF=0
+ ( 0xc8, 0x81 ), # AL=0x68, AF=0 CF=1
+ ( 0x6e, 0x10 ), # AL=0x68, AF=1 CF=0
+ ( 0xce, 0x91 ), # AL=0x68, AF=1 CF=1
+ ( 0x69, 0x04 ), # AL=0x69, AF=0 CF=0
+ ( 0xc9, 0x85 ), # AL=0x69, AF=0 CF=1
+ ( 0x6f, 0x14 ), # AL=0x69, AF=1 CF=0
+ ( 0xcf, 0x95 ), # AL=0x69, AF=1 CF=1
+ ( 0x70, 0x10 ), # AL=0x6a, AF=0 CF=0
+ ( 0xd0, 0x91 ), # AL=0x6a, AF=0 CF=1
+ ( 0x70, 0x10 ), # AL=0x6a, AF=1 CF=0
+ ( 0xd0, 0x91 ), # AL=0x6a, AF=1 CF=1
+ ( 0x71, 0x14 ), # AL=0x6b, AF=0 CF=0
+ ( 0xd1, 0x95 ), # AL=0x6b, AF=0 CF=1
+ ( 0x71, 0x14 ), # AL=0x6b, AF=1 CF=0
+ ( 0xd1, 0x95 ), # AL=0x6b, AF=1 CF=1
+ ( 0x72, 0x14 ), # AL=0x6c, AF=0 CF=0
+ ( 0xd2, 0x95 ), # AL=0x6c, AF=0 CF=1
+ ( 0x72, 0x14 ), # AL=0x6c, AF=1 CF=0
+ ( 0xd2, 0x95 ), # AL=0x6c, AF=1 CF=1
+ ( 0x73, 0x10 ), # AL=0x6d, AF=0 CF=0
+ ( 0xd3, 0x91 ), # AL=0x6d, AF=0 CF=1
+ ( 0x73, 0x10 ), # AL=0x6d, AF=1 CF=0
+ ( 0xd3, 0x91 ), # AL=0x6d, AF=1 CF=1
+ ( 0x74, 0x14 ), # AL=0x6e, AF=0 CF=0
+ ( 0xd4, 0x95 ), # AL=0x6e, AF=0 CF=1
+ ( 0x74, 0x14 ), # AL=0x6e, AF=1 CF=0
+ ( 0xd4, 0x95 ), # AL=0x6e, AF=1 CF=1
+ ( 0x75, 0x10 ), # AL=0x6f, AF=0 CF=0
+ ( 0xd5, 0x91 ), # AL=0x6f, AF=0 CF=1
+ ( 0x75, 0x10 ), # AL=0x6f, AF=1 CF=0
+ ( 0xd5, 0x91 ), # AL=0x6f, AF=1 CF=1
+ ( 0x70, 0x00 ), # AL=0x70, AF=0 CF=0
+ ( 0xd0, 0x81 ), # AL=0x70, AF=0 CF=1
+ ( 0x76, 0x10 ), # AL=0x70, AF=1 CF=0
+ ( 0xd6, 0x91 ), # AL=0x70, AF=1 CF=1
+ ( 0x71, 0x04 ), # AL=0x71, AF=0 CF=0
+ ( 0xd1, 0x85 ), # AL=0x71, AF=0 CF=1
+ ( 0x77, 0x14 ), # AL=0x71, AF=1 CF=0
+ ( 0xd7, 0x95 ), # AL=0x71, AF=1 CF=1
+ ( 0x72, 0x04 ), # AL=0x72, AF=0 CF=0
+ ( 0xd2, 0x85 ), # AL=0x72, AF=0 CF=1
+ ( 0x78, 0x14 ), # AL=0x72, AF=1 CF=0
+ ( 0xd8, 0x95 ), # AL=0x72, AF=1 CF=1
+ ( 0x73, 0x00 ), # AL=0x73, AF=0 CF=0
+ ( 0xd3, 0x81 ), # AL=0x73, AF=0 CF=1
+ ( 0x79, 0x10 ), # AL=0x73, AF=1 CF=0
+ ( 0xd9, 0x91 ), # AL=0x73, AF=1 CF=1
+ ( 0x74, 0x04 ), # AL=0x74, AF=0 CF=0
+ ( 0xd4, 0x85 ), # AL=0x74, AF=0 CF=1
+ ( 0x7a, 0x10 ), # AL=0x74, AF=1 CF=0
+ ( 0xda, 0x91 ), # AL=0x74, AF=1 CF=1
+ ( 0x75, 0x00 ), # AL=0x75, AF=0 CF=0
+ ( 0xd5, 0x81 ), # AL=0x75, AF=0 CF=1
+ ( 0x7b, 0x14 ), # AL=0x75, AF=1 CF=0
+ ( 0xdb, 0x95 ), # AL=0x75, AF=1 CF=1
+ ( 0x76, 0x00 ), # AL=0x76, AF=0 CF=0
+ ( 0xd6, 0x81 ), # AL=0x76, AF=0 CF=1
+ ( 0x7c, 0x10 ), # AL=0x76, AF=1 CF=0
+ ( 0xdc, 0x91 ), # AL=0x76, AF=1 CF=1
+ ( 0x77, 0x04 ), # AL=0x77, AF=0 CF=0
+ ( 0xd7, 0x85 ), # AL=0x77, AF=0 CF=1
+ ( 0x7d, 0x14 ), # AL=0x77, AF=1 CF=0
+ ( 0xdd, 0x95 ), # AL=0x77, AF=1 CF=1
+ ( 0x78, 0x04 ), # AL=0x78, AF=0 CF=0
+ ( 0xd8, 0x85 ), # AL=0x78, AF=0 CF=1
+ ( 0x7e, 0x14 ), # AL=0x78, AF=1 CF=0
+ ( 0xde, 0x95 ), # AL=0x78, AF=1 CF=1
+ ( 0x79, 0x00 ), # AL=0x79, AF=0 CF=0
+ ( 0xd9, 0x81 ), # AL=0x79, AF=0 CF=1
+ ( 0x7f, 0x10 ), # AL=0x79, AF=1 CF=0
+ ( 0xdf, 0x91 ), # AL=0x79, AF=1 CF=1
+ ( 0x80, 0x90 ), # AL=0x7a, AF=0 CF=0
+ ( 0xe0, 0x91 ), # AL=0x7a, AF=0 CF=1
+ ( 0x80, 0x90 ), # AL=0x7a, AF=1 CF=0
+ ( 0xe0, 0x91 ), # AL=0x7a, AF=1 CF=1
+ ( 0x81, 0x94 ), # AL=0x7b, AF=0 CF=0
+ ( 0xe1, 0x95 ), # AL=0x7b, AF=0 CF=1
+ ( 0x81, 0x94 ), # AL=0x7b, AF=1 CF=0
+ ( 0xe1, 0x95 ), # AL=0x7b, AF=1 CF=1
+ ( 0x82, 0x94 ), # AL=0x7c, AF=0 CF=0
+ ( 0xe2, 0x95 ), # AL=0x7c, AF=0 CF=1
+ ( 0x82, 0x94 ), # AL=0x7c, AF=1 CF=0
+ ( 0xe2, 0x95 ), # AL=0x7c, AF=1 CF=1
+ ( 0x83, 0x90 ), # AL=0x7d, AF=0 CF=0
+ ( 0xe3, 0x91 ), # AL=0x7d, AF=0 CF=1
+ ( 0x83, 0x90 ), # AL=0x7d, AF=1 CF=0
+ ( 0xe3, 0x91 ), # AL=0x7d, AF=1 CF=1
+ ( 0x84, 0x94 ), # AL=0x7e, AF=0 CF=0
+ ( 0xe4, 0x95 ), # AL=0x7e, AF=0 CF=1
+ ( 0x84, 0x94 ), # AL=0x7e, AF=1 CF=0
+ ( 0xe4, 0x95 ), # AL=0x7e, AF=1 CF=1
+ ( 0x85, 0x90 ), # AL=0x7f, AF=0 CF=0
+ ( 0xe5, 0x91 ), # AL=0x7f, AF=0 CF=1
+ ( 0x85, 0x90 ), # AL=0x7f, AF=1 CF=0
+ ( 0xe5, 0x91 ), # AL=0x7f, AF=1 CF=1
+ ( 0x80, 0x80 ), # AL=0x80, AF=0 CF=0
+ ( 0xe0, 0x81 ), # AL=0x80, AF=0 CF=1
+ ( 0x86, 0x90 ), # AL=0x80, AF=1 CF=0
+ ( 0xe6, 0x91 ), # AL=0x80, AF=1 CF=1
+ ( 0x81, 0x84 ), # AL=0x81, AF=0 CF=0
+ ( 0xe1, 0x85 ), # AL=0x81, AF=0 CF=1
+ ( 0x87, 0x94 ), # AL=0x81, AF=1 CF=0
+ ( 0xe7, 0x95 ), # AL=0x81, AF=1 CF=1
+ ( 0x82, 0x84 ), # AL=0x82, AF=0 CF=0
+ ( 0xe2, 0x85 ), # AL=0x82, AF=0 CF=1
+ ( 0x88, 0x94 ), # AL=0x82, AF=1 CF=0
+ ( 0xe8, 0x95 ), # AL=0x82, AF=1 CF=1
+ ( 0x83, 0x80 ), # AL=0x83, AF=0 CF=0
+ ( 0xe3, 0x81 ), # AL=0x83, AF=0 CF=1
+ ( 0x89, 0x90 ), # AL=0x83, AF=1 CF=0
+ ( 0xe9, 0x91 ), # AL=0x83, AF=1 CF=1
+ ( 0x84, 0x84 ), # AL=0x84, AF=0 CF=0
+ ( 0xe4, 0x85 ), # AL=0x84, AF=0 CF=1
+ ( 0x8a, 0x90 ), # AL=0x84, AF=1 CF=0
+ ( 0xea, 0x91 ), # AL=0x84, AF=1 CF=1
+ ( 0x85, 0x80 ), # AL=0x85, AF=0 CF=0
+ ( 0xe5, 0x81 ), # AL=0x85, AF=0 CF=1
+ ( 0x8b, 0x94 ), # AL=0x85, AF=1 CF=0
+ ( 0xeb, 0x95 ), # AL=0x85, AF=1 CF=1
+ ( 0x86, 0x80 ), # AL=0x86, AF=0 CF=0
+ ( 0xe6, 0x81 ), # AL=0x86, AF=0 CF=1
+ ( 0x8c, 0x90 ), # AL=0x86, AF=1 CF=0
+ ( 0xec, 0x91 ), # AL=0x86, AF=1 CF=1
+ ( 0x87, 0x84 ), # AL=0x87, AF=0 CF=0
+ ( 0xe7, 0x85 ), # AL=0x87, AF=0 CF=1
+ ( 0x8d, 0x94 ), # AL=0x87, AF=1 CF=0
+ ( 0xed, 0x95 ), # AL=0x87, AF=1 CF=1
+ ( 0x88, 0x84 ), # AL=0x88, AF=0 CF=0
+ ( 0xe8, 0x85 ), # AL=0x88, AF=0 CF=1
+ ( 0x8e, 0x94 ), # AL=0x88, AF=1 CF=0
+ ( 0xee, 0x95 ), # AL=0x88, AF=1 CF=1
+ ( 0x89, 0x80 ), # AL=0x89, AF=0 CF=0
+ ( 0xe9, 0x81 ), # AL=0x89, AF=0 CF=1
+ ( 0x8f, 0x90 ), # AL=0x89, AF=1 CF=0
+ ( 0xef, 0x91 ), # AL=0x89, AF=1 CF=1
+ ( 0x90, 0x94 ), # AL=0x8a, AF=0 CF=0
+ ( 0xf0, 0x95 ), # AL=0x8a, AF=0 CF=1
+ ( 0x90, 0x94 ), # AL=0x8a, AF=1 CF=0
+ ( 0xf0, 0x95 ), # AL=0x8a, AF=1 CF=1
+ ( 0x91, 0x90 ), # AL=0x8b, AF=0 CF=0
+ ( 0xf1, 0x91 ), # AL=0x8b, AF=0 CF=1
+ ( 0x91, 0x90 ), # AL=0x8b, AF=1 CF=0
+ ( 0xf1, 0x91 ), # AL=0x8b, AF=1 CF=1
+ ( 0x92, 0x90 ), # AL=0x8c, AF=0 CF=0
+ ( 0xf2, 0x91 ), # AL=0x8c, AF=0 CF=1
+ ( 0x92, 0x90 ), # AL=0x8c, AF=1 CF=0
+ ( 0xf2, 0x91 ), # AL=0x8c, AF=1 CF=1
+ ( 0x93, 0x94 ), # AL=0x8d, AF=0 CF=0
+ ( 0xf3, 0x95 ), # AL=0x8d, AF=0 CF=1
+ ( 0x93, 0x94 ), # AL=0x8d, AF=1 CF=0
+ ( 0xf3, 0x95 ), # AL=0x8d, AF=1 CF=1
+ ( 0x94, 0x90 ), # AL=0x8e, AF=0 CF=0
+ ( 0xf4, 0x91 ), # AL=0x8e, AF=0 CF=1
+ ( 0x94, 0x90 ), # AL=0x8e, AF=1 CF=0
+ ( 0xf4, 0x91 ), # AL=0x8e, AF=1 CF=1
+ ( 0x95, 0x94 ), # AL=0x8f, AF=0 CF=0
+ ( 0xf5, 0x95 ), # AL=0x8f, AF=0 CF=1
+ ( 0x95, 0x94 ), # AL=0x8f, AF=1 CF=0
+ ( 0xf5, 0x95 ), # AL=0x8f, AF=1 CF=1
+ ( 0x90, 0x84 ), # AL=0x90, AF=0 CF=0
+ ( 0xf0, 0x85 ), # AL=0x90, AF=0 CF=1
+ ( 0x96, 0x94 ), # AL=0x90, AF=1 CF=0
+ ( 0xf6, 0x95 ), # AL=0x90, AF=1 CF=1
+ ( 0x91, 0x80 ), # AL=0x91, AF=0 CF=0
+ ( 0xf1, 0x81 ), # AL=0x91, AF=0 CF=1
+ ( 0x97, 0x90 ), # AL=0x91, AF=1 CF=0
+ ( 0xf7, 0x91 ), # AL=0x91, AF=1 CF=1
+ ( 0x92, 0x80 ), # AL=0x92, AF=0 CF=0
+ ( 0xf2, 0x81 ), # AL=0x92, AF=0 CF=1
+ ( 0x98, 0x90 ), # AL=0x92, AF=1 CF=0
+ ( 0xf8, 0x91 ), # AL=0x92, AF=1 CF=1
+ ( 0x93, 0x84 ), # AL=0x93, AF=0 CF=0
+ ( 0xf3, 0x85 ), # AL=0x93, AF=0 CF=1
+ ( 0x99, 0x94 ), # AL=0x93, AF=1 CF=0
+ ( 0xf9, 0x95 ), # AL=0x93, AF=1 CF=1
+ ( 0x94, 0x80 ), # AL=0x94, AF=0 CF=0
+ ( 0xf4, 0x81 ), # AL=0x94, AF=0 CF=1
+ ( 0x9a, 0x94 ), # AL=0x94, AF=1 CF=0
+ ( 0xfa, 0x95 ), # AL=0x94, AF=1 CF=1
+ ( 0x95, 0x84 ), # AL=0x95, AF=0 CF=0
+ ( 0xf5, 0x85 ), # AL=0x95, AF=0 CF=1
+ ( 0x9b, 0x90 ), # AL=0x95, AF=1 CF=0
+ ( 0xfb, 0x91 ), # AL=0x95, AF=1 CF=1
+ ( 0x96, 0x84 ), # AL=0x96, AF=0 CF=0
+ ( 0xf6, 0x85 ), # AL=0x96, AF=0 CF=1
+ ( 0x9c, 0x94 ), # AL=0x96, AF=1 CF=0
+ ( 0xfc, 0x95 ), # AL=0x96, AF=1 CF=1
+ ( 0x97, 0x80 ), # AL=0x97, AF=0 CF=0
+ ( 0xf7, 0x81 ), # AL=0x97, AF=0 CF=1
+ ( 0x9d, 0x90 ), # AL=0x97, AF=1 CF=0
+ ( 0xfd, 0x91 ), # AL=0x97, AF=1 CF=1
+ ( 0x98, 0x80 ), # AL=0x98, AF=0 CF=0
+ ( 0xf8, 0x81 ), # AL=0x98, AF=0 CF=1
+ ( 0x9e, 0x90 ), # AL=0x98, AF=1 CF=0
+ ( 0xfe, 0x91 ), # AL=0x98, AF=1 CF=1
+ ( 0x99, 0x84 ), # AL=0x99, AF=0 CF=0
+ ( 0xf9, 0x85 ), # AL=0x99, AF=0 CF=1
+ ( 0x9f, 0x94 ), # AL=0x99, AF=1 CF=0
+ ( 0xff, 0x95 ), # AL=0x99, AF=1 CF=1
+ ( 0x00, 0x55 ), # AL=0x9a, AF=0 CF=0
+ ( 0x00, 0x55 ), # AL=0x9a, AF=0 CF=1
+ ( 0x00, 0x55 ), # AL=0x9a, AF=1 CF=0
+ ( 0x00, 0x55 ), # AL=0x9a, AF=1 CF=1
+ ( 0x01, 0x11 ), # AL=0x9b, AF=0 CF=0
+ ( 0x01, 0x11 ), # AL=0x9b, AF=0 CF=1
+ ( 0x01, 0x11 ), # AL=0x9b, AF=1 CF=0
+ ( 0x01, 0x11 ), # AL=0x9b, AF=1 CF=1
+ ( 0x02, 0x11 ), # AL=0x9c, AF=0 CF=0
+ ( 0x02, 0x11 ), # AL=0x9c, AF=0 CF=1
+ ( 0x02, 0x11 ), # AL=0x9c, AF=1 CF=0
+ ( 0x02, 0x11 ), # AL=0x9c, AF=1 CF=1
+ ( 0x03, 0x15 ), # AL=0x9d, AF=0 CF=0
+ ( 0x03, 0x15 ), # AL=0x9d, AF=0 CF=1
+ ( 0x03, 0x15 ), # AL=0x9d, AF=1 CF=0
+ ( 0x03, 0x15 ), # AL=0x9d, AF=1 CF=1
+ ( 0x04, 0x11 ), # AL=0x9e, AF=0 CF=0
+ ( 0x04, 0x11 ), # AL=0x9e, AF=0 CF=1
+ ( 0x04, 0x11 ), # AL=0x9e, AF=1 CF=0
+ ( 0x04, 0x11 ), # AL=0x9e, AF=1 CF=1
+ ( 0x05, 0x15 ), # AL=0x9f, AF=0 CF=0
+ ( 0x05, 0x15 ), # AL=0x9f, AF=0 CF=1
+ ( 0x05, 0x15 ), # AL=0x9f, AF=1 CF=0
+ ( 0x05, 0x15 ), # AL=0x9f, AF=1 CF=1
+ ( 0x00, 0x45 ), # AL=0xa0, AF=0 CF=0
+ ( 0x00, 0x45 ), # AL=0xa0, AF=0 CF=1
+ ( 0x06, 0x15 ), # AL=0xa0, AF=1 CF=0
+ ( 0x06, 0x15 ), # AL=0xa0, AF=1 CF=1
+ ( 0x01, 0x01 ), # AL=0xa1, AF=0 CF=0
+ ( 0x01, 0x01 ), # AL=0xa1, AF=0 CF=1
+ ( 0x07, 0x11 ), # AL=0xa1, AF=1 CF=0
+ ( 0x07, 0x11 ), # AL=0xa1, AF=1 CF=1
+ ( 0x02, 0x01 ), # AL=0xa2, AF=0 CF=0
+ ( 0x02, 0x01 ), # AL=0xa2, AF=0 CF=1
+ ( 0x08, 0x11 ), # AL=0xa2, AF=1 CF=0
+ ( 0x08, 0x11 ), # AL=0xa2, AF=1 CF=1
+ ( 0x03, 0x05 ), # AL=0xa3, AF=0 CF=0
+ ( 0x03, 0x05 ), # AL=0xa3, AF=0 CF=1
+ ( 0x09, 0x15 ), # AL=0xa3, AF=1 CF=0
+ ( 0x09, 0x15 ), # AL=0xa3, AF=1 CF=1
+ ( 0x04, 0x01 ), # AL=0xa4, AF=0 CF=0
+ ( 0x04, 0x01 ), # AL=0xa4, AF=0 CF=1
+ ( 0x0a, 0x15 ), # AL=0xa4, AF=1 CF=0
+ ( 0x0a, 0x15 ), # AL=0xa4, AF=1 CF=1
+ ( 0x05, 0x05 ), # AL=0xa5, AF=0 CF=0
+ ( 0x05, 0x05 ), # AL=0xa5, AF=0 CF=1
+ ( 0x0b, 0x11 ), # AL=0xa5, AF=1 CF=0
+ ( 0x0b, 0x11 ), # AL=0xa5, AF=1 CF=1
+ ( 0x06, 0x05 ), # AL=0xa6, AF=0 CF=0
+ ( 0x06, 0x05 ), # AL=0xa6, AF=0 CF=1
+ ( 0x0c, 0x15 ), # AL=0xa6, AF=1 CF=0
+ ( 0x0c, 0x15 ), # AL=0xa6, AF=1 CF=1
+ ( 0x07, 0x01 ), # AL=0xa7, AF=0 CF=0
+ ( 0x07, 0x01 ), # AL=0xa7, AF=0 CF=1
+ ( 0x0d, 0x11 ), # AL=0xa7, AF=1 CF=0
+ ( 0x0d, 0x11 ), # AL=0xa7, AF=1 CF=1
+ ( 0x08, 0x01 ), # AL=0xa8, AF=0 CF=0
+ ( 0x08, 0x01 ), # AL=0xa8, AF=0 CF=1
+ ( 0x0e, 0x11 ), # AL=0xa8, AF=1 CF=0
+ ( 0x0e, 0x11 ), # AL=0xa8, AF=1 CF=1
+ ( 0x09, 0x05 ), # AL=0xa9, AF=0 CF=0
+ ( 0x09, 0x05 ), # AL=0xa9, AF=0 CF=1
+ ( 0x0f, 0x15 ), # AL=0xa9, AF=1 CF=0
+ ( 0x0f, 0x15 ), # AL=0xa9, AF=1 CF=1
+ ( 0x10, 0x11 ), # AL=0xaa, AF=0 CF=0
+ ( 0x10, 0x11 ), # AL=0xaa, AF=0 CF=1
+ ( 0x10, 0x11 ), # AL=0xaa, AF=1 CF=0
+ ( 0x10, 0x11 ), # AL=0xaa, AF=1 CF=1
+ ( 0x11, 0x15 ), # AL=0xab, AF=0 CF=0
+ ( 0x11, 0x15 ), # AL=0xab, AF=0 CF=1
+ ( 0x11, 0x15 ), # AL=0xab, AF=1 CF=0
+ ( 0x11, 0x15 ), # AL=0xab, AF=1 CF=1
+ ( 0x12, 0x15 ), # AL=0xac, AF=0 CF=0
+ ( 0x12, 0x15 ), # AL=0xac, AF=0 CF=1
+ ( 0x12, 0x15 ), # AL=0xac, AF=1 CF=0
+ ( 0x12, 0x15 ), # AL=0xac, AF=1 CF=1
+ ( 0x13, 0x11 ), # AL=0xad, AF=0 CF=0
+ ( 0x13, 0x11 ), # AL=0xad, AF=0 CF=1
+ ( 0x13, 0x11 ), # AL=0xad, AF=1 CF=0
+ ( 0x13, 0x11 ), # AL=0xad, AF=1 CF=1
+ ( 0x14, 0x15 ), # AL=0xae, AF=0 CF=0
+ ( 0x14, 0x15 ), # AL=0xae, AF=0 CF=1
+ ( 0x14, 0x15 ), # AL=0xae, AF=1 CF=0
+ ( 0x14, 0x15 ), # AL=0xae, AF=1 CF=1
+ ( 0x15, 0x11 ), # AL=0xaf, AF=0 CF=0
+ ( 0x15, 0x11 ), # AL=0xaf, AF=0 CF=1
+ ( 0x15, 0x11 ), # AL=0xaf, AF=1 CF=0
+ ( 0x15, 0x11 ), # AL=0xaf, AF=1 CF=1
+ ( 0x10, 0x01 ), # AL=0xb0, AF=0 CF=0
+ ( 0x10, 0x01 ), # AL=0xb0, AF=0 CF=1
+ ( 0x16, 0x11 ), # AL=0xb0, AF=1 CF=0
+ ( 0x16, 0x11 ), # AL=0xb0, AF=1 CF=1
+ ( 0x11, 0x05 ), # AL=0xb1, AF=0 CF=0
+ ( 0x11, 0x05 ), # AL=0xb1, AF=0 CF=1
+ ( 0x17, 0x15 ), # AL=0xb1, AF=1 CF=0
+ ( 0x17, 0x15 ), # AL=0xb1, AF=1 CF=1
+ ( 0x12, 0x05 ), # AL=0xb2, AF=0 CF=0
+ ( 0x12, 0x05 ), # AL=0xb2, AF=0 CF=1
+ ( 0x18, 0x15 ), # AL=0xb2, AF=1 CF=0
+ ( 0x18, 0x15 ), # AL=0xb2, AF=1 CF=1
+ ( 0x13, 0x01 ), # AL=0xb3, AF=0 CF=0
+ ( 0x13, 0x01 ), # AL=0xb3, AF=0 CF=1
+ ( 0x19, 0x11 ), # AL=0xb3, AF=1 CF=0
+ ( 0x19, 0x11 ), # AL=0xb3, AF=1 CF=1
+ ( 0x14, 0x05 ), # AL=0xb4, AF=0 CF=0
+ ( 0x14, 0x05 ), # AL=0xb4, AF=0 CF=1
+ ( 0x1a, 0x11 ), # AL=0xb4, AF=1 CF=0
+ ( 0x1a, 0x11 ), # AL=0xb4, AF=1 CF=1
+ ( 0x15, 0x01 ), # AL=0xb5, AF=0 CF=0
+ ( 0x15, 0x01 ), # AL=0xb5, AF=0 CF=1
+ ( 0x1b, 0x15 ), # AL=0xb5, AF=1 CF=0
+ ( 0x1b, 0x15 ), # AL=0xb5, AF=1 CF=1
+ ( 0x16, 0x01 ), # AL=0xb6, AF=0 CF=0
+ ( 0x16, 0x01 ), # AL=0xb6, AF=0 CF=1
+ ( 0x1c, 0x11 ), # AL=0xb6, AF=1 CF=0
+ ( 0x1c, 0x11 ), # AL=0xb6, AF=1 CF=1
+ ( 0x17, 0x05 ), # AL=0xb7, AF=0 CF=0
+ ( 0x17, 0x05 ), # AL=0xb7, AF=0 CF=1
+ ( 0x1d, 0x15 ), # AL=0xb7, AF=1 CF=0
+ ( 0x1d, 0x15 ), # AL=0xb7, AF=1 CF=1
+ ( 0x18, 0x05 ), # AL=0xb8, AF=0 CF=0
+ ( 0x18, 0x05 ), # AL=0xb8, AF=0 CF=1
+ ( 0x1e, 0x15 ), # AL=0xb8, AF=1 CF=0
+ ( 0x1e, 0x15 ), # AL=0xb8, AF=1 CF=1
+ ( 0x19, 0x01 ), # AL=0xb9, AF=0 CF=0
+ ( 0x19, 0x01 ), # AL=0xb9, AF=0 CF=1
+ ( 0x1f, 0x11 ), # AL=0xb9, AF=1 CF=0
+ ( 0x1f, 0x11 ), # AL=0xb9, AF=1 CF=1
+ ( 0x20, 0x11 ), # AL=0xba, AF=0 CF=0
+ ( 0x20, 0x11 ), # AL=0xba, AF=0 CF=1
+ ( 0x20, 0x11 ), # AL=0xba, AF=1 CF=0
+ ( 0x20, 0x11 ), # AL=0xba, AF=1 CF=1
+ ( 0x21, 0x15 ), # AL=0xbb, AF=0 CF=0
+ ( 0x21, 0x15 ), # AL=0xbb, AF=0 CF=1
+ ( 0x21, 0x15 ), # AL=0xbb, AF=1 CF=0
+ ( 0x21, 0x15 ), # AL=0xbb, AF=1 CF=1
+ ( 0x22, 0x15 ), # AL=0xbc, AF=0 CF=0
+ ( 0x22, 0x15 ), # AL=0xbc, AF=0 CF=1
+ ( 0x22, 0x15 ), # AL=0xbc, AF=1 CF=0
+ ( 0x22, 0x15 ), # AL=0xbc, AF=1 CF=1
+ ( 0x23, 0x11 ), # AL=0xbd, AF=0 CF=0
+ ( 0x23, 0x11 ), # AL=0xbd, AF=0 CF=1
+ ( 0x23, 0x11 ), # AL=0xbd, AF=1 CF=0
+ ( 0x23, 0x11 ), # AL=0xbd, AF=1 CF=1
+ ( 0x24, 0x15 ), # AL=0xbe, AF=0 CF=0
+ ( 0x24, 0x15 ), # AL=0xbe, AF=0 CF=1
+ ( 0x24, 0x15 ), # AL=0xbe, AF=1 CF=0
+ ( 0x24, 0x15 ), # AL=0xbe, AF=1 CF=1
+ ( 0x25, 0x11 ), # AL=0xbf, AF=0 CF=0
+ ( 0x25, 0x11 ), # AL=0xbf, AF=0 CF=1
+ ( 0x25, 0x11 ), # AL=0xbf, AF=1 CF=0
+ ( 0x25, 0x11 ), # AL=0xbf, AF=1 CF=1
+ ( 0x20, 0x01 ), # AL=0xc0, AF=0 CF=0
+ ( 0x20, 0x01 ), # AL=0xc0, AF=0 CF=1
+ ( 0x26, 0x11 ), # AL=0xc0, AF=1 CF=0
+ ( 0x26, 0x11 ), # AL=0xc0, AF=1 CF=1
+ ( 0x21, 0x05 ), # AL=0xc1, AF=0 CF=0
+ ( 0x21, 0x05 ), # AL=0xc1, AF=0 CF=1
+ ( 0x27, 0x15 ), # AL=0xc1, AF=1 CF=0
+ ( 0x27, 0x15 ), # AL=0xc1, AF=1 CF=1
+ ( 0x22, 0x05 ), # AL=0xc2, AF=0 CF=0
+ ( 0x22, 0x05 ), # AL=0xc2, AF=0 CF=1
+ ( 0x28, 0x15 ), # AL=0xc2, AF=1 CF=0
+ ( 0x28, 0x15 ), # AL=0xc2, AF=1 CF=1
+ ( 0x23, 0x01 ), # AL=0xc3, AF=0 CF=0
+ ( 0x23, 0x01 ), # AL=0xc3, AF=0 CF=1
+ ( 0x29, 0x11 ), # AL=0xc3, AF=1 CF=0
+ ( 0x29, 0x11 ), # AL=0xc3, AF=1 CF=1
+ ( 0x24, 0x05 ), # AL=0xc4, AF=0 CF=0
+ ( 0x24, 0x05 ), # AL=0xc4, AF=0 CF=1
+ ( 0x2a, 0x11 ), # AL=0xc4, AF=1 CF=0
+ ( 0x2a, 0x11 ), # AL=0xc4, AF=1 CF=1
+ ( 0x25, 0x01 ), # AL=0xc5, AF=0 CF=0
+ ( 0x25, 0x01 ), # AL=0xc5, AF=0 CF=1
+ ( 0x2b, 0x15 ), # AL=0xc5, AF=1 CF=0
+ ( 0x2b, 0x15 ), # AL=0xc5, AF=1 CF=1
+ ( 0x26, 0x01 ), # AL=0xc6, AF=0 CF=0
+ ( 0x26, 0x01 ), # AL=0xc6, AF=0 CF=1
+ ( 0x2c, 0x11 ), # AL=0xc6, AF=1 CF=0
+ ( 0x2c, 0x11 ), # AL=0xc6, AF=1 CF=1
+ ( 0x27, 0x05 ), # AL=0xc7, AF=0 CF=0
+ ( 0x27, 0x05 ), # AL=0xc7, AF=0 CF=1
+ ( 0x2d, 0x15 ), # AL=0xc7, AF=1 CF=0
+ ( 0x2d, 0x15 ), # AL=0xc7, AF=1 CF=1
+ ( 0x28, 0x05 ), # AL=0xc8, AF=0 CF=0
+ ( 0x28, 0x05 ), # AL=0xc8, AF=0 CF=1
+ ( 0x2e, 0x15 ), # AL=0xc8, AF=1 CF=0
+ ( 0x2e, 0x15 ), # AL=0xc8, AF=1 CF=1
+ ( 0x29, 0x01 ), # AL=0xc9, AF=0 CF=0
+ ( 0x29, 0x01 ), # AL=0xc9, AF=0 CF=1
+ ( 0x2f, 0x11 ), # AL=0xc9, AF=1 CF=0
+ ( 0x2f, 0x11 ), # AL=0xc9, AF=1 CF=1
+ ( 0x30, 0x15 ), # AL=0xca, AF=0 CF=0
+ ( 0x30, 0x15 ), # AL=0xca, AF=0 CF=1
+ ( 0x30, 0x15 ), # AL=0xca, AF=1 CF=0
+ ( 0x30, 0x15 ), # AL=0xca, AF=1 CF=1
+ ( 0x31, 0x11 ), # AL=0xcb, AF=0 CF=0
+ ( 0x31, 0x11 ), # AL=0xcb, AF=0 CF=1
+ ( 0x31, 0x11 ), # AL=0xcb, AF=1 CF=0
+ ( 0x31, 0x11 ), # AL=0xcb, AF=1 CF=1
+ ( 0x32, 0x11 ), # AL=0xcc, AF=0 CF=0
+ ( 0x32, 0x11 ), # AL=0xcc, AF=0 CF=1
+ ( 0x32, 0x11 ), # AL=0xcc, AF=1 CF=0
+ ( 0x32, 0x11 ), # AL=0xcc, AF=1 CF=1
+ ( 0x33, 0x15 ), # AL=0xcd, AF=0 CF=0
+ ( 0x33, 0x15 ), # AL=0xcd, AF=0 CF=1
+ ( 0x33, 0x15 ), # AL=0xcd, AF=1 CF=0
+ ( 0x33, 0x15 ), # AL=0xcd, AF=1 CF=1
+ ( 0x34, 0x11 ), # AL=0xce, AF=0 CF=0
+ ( 0x34, 0x11 ), # AL=0xce, AF=0 CF=1
+ ( 0x34, 0x11 ), # AL=0xce, AF=1 CF=0
+ ( 0x34, 0x11 ), # AL=0xce, AF=1 CF=1
+ ( 0x35, 0x15 ), # AL=0xcf, AF=0 CF=0
+ ( 0x35, 0x15 ), # AL=0xcf, AF=0 CF=1
+ ( 0x35, 0x15 ), # AL=0xcf, AF=1 CF=0
+ ( 0x35, 0x15 ), # AL=0xcf, AF=1 CF=1
+ ( 0x30, 0x05 ), # AL=0xd0, AF=0 CF=0
+ ( 0x30, 0x05 ), # AL=0xd0, AF=0 CF=1
+ ( 0x36, 0x15 ), # AL=0xd0, AF=1 CF=0
+ ( 0x36, 0x15 ), # AL=0xd0, AF=1 CF=1
+ ( 0x31, 0x01 ), # AL=0xd1, AF=0 CF=0
+ ( 0x31, 0x01 ), # AL=0xd1, AF=0 CF=1
+ ( 0x37, 0x11 ), # AL=0xd1, AF=1 CF=0
+ ( 0x37, 0x11 ), # AL=0xd1, AF=1 CF=1
+ ( 0x32, 0x01 ), # AL=0xd2, AF=0 CF=0
+ ( 0x32, 0x01 ), # AL=0xd2, AF=0 CF=1
+ ( 0x38, 0x11 ), # AL=0xd2, AF=1 CF=0
+ ( 0x38, 0x11 ), # AL=0xd2, AF=1 CF=1
+ ( 0x33, 0x05 ), # AL=0xd3, AF=0 CF=0
+ ( 0x33, 0x05 ), # AL=0xd3, AF=0 CF=1
+ ( 0x39, 0x15 ), # AL=0xd3, AF=1 CF=0
+ ( 0x39, 0x15 ), # AL=0xd3, AF=1 CF=1
+ ( 0x34, 0x01 ), # AL=0xd4, AF=0 CF=0
+ ( 0x34, 0x01 ), # AL=0xd4, AF=0 CF=1
+ ( 0x3a, 0x15 ), # AL=0xd4, AF=1 CF=0
+ ( 0x3a, 0x15 ), # AL=0xd4, AF=1 CF=1
+ ( 0x35, 0x05 ), # AL=0xd5, AF=0 CF=0
+ ( 0x35, 0x05 ), # AL=0xd5, AF=0 CF=1
+ ( 0x3b, 0x11 ), # AL=0xd5, AF=1 CF=0
+ ( 0x3b, 0x11 ), # AL=0xd5, AF=1 CF=1
+ ( 0x36, 0x05 ), # AL=0xd6, AF=0 CF=0
+ ( 0x36, 0x05 ), # AL=0xd6, AF=0 CF=1
+ ( 0x3c, 0x15 ), # AL=0xd6, AF=1 CF=0
+ ( 0x3c, 0x15 ), # AL=0xd6, AF=1 CF=1
+ ( 0x37, 0x01 ), # AL=0xd7, AF=0 CF=0
+ ( 0x37, 0x01 ), # AL=0xd7, AF=0 CF=1
+ ( 0x3d, 0x11 ), # AL=0xd7, AF=1 CF=0
+ ( 0x3d, 0x11 ), # AL=0xd7, AF=1 CF=1
+ ( 0x38, 0x01 ), # AL=0xd8, AF=0 CF=0
+ ( 0x38, 0x01 ), # AL=0xd8, AF=0 CF=1
+ ( 0x3e, 0x11 ), # AL=0xd8, AF=1 CF=0
+ ( 0x3e, 0x11 ), # AL=0xd8, AF=1 CF=1
+ ( 0x39, 0x05 ), # AL=0xd9, AF=0 CF=0
+ ( 0x39, 0x05 ), # AL=0xd9, AF=0 CF=1
+ ( 0x3f, 0x15 ), # AL=0xd9, AF=1 CF=0
+ ( 0x3f, 0x15 ), # AL=0xd9, AF=1 CF=1
+ ( 0x40, 0x11 ), # AL=0xda, AF=0 CF=0
+ ( 0x40, 0x11 ), # AL=0xda, AF=0 CF=1
+ ( 0x40, 0x11 ), # AL=0xda, AF=1 CF=0
+ ( 0x40, 0x11 ), # AL=0xda, AF=1 CF=1
+ ( 0x41, 0x15 ), # AL=0xdb, AF=0 CF=0
+ ( 0x41, 0x15 ), # AL=0xdb, AF=0 CF=1
+ ( 0x41, 0x15 ), # AL=0xdb, AF=1 CF=0
+ ( 0x41, 0x15 ), # AL=0xdb, AF=1 CF=1
+ ( 0x42, 0x15 ), # AL=0xdc, AF=0 CF=0
+ ( 0x42, 0x15 ), # AL=0xdc, AF=0 CF=1
+ ( 0x42, 0x15 ), # AL=0xdc, AF=1 CF=0
+ ( 0x42, 0x15 ), # AL=0xdc, AF=1 CF=1
+ ( 0x43, 0x11 ), # AL=0xdd, AF=0 CF=0
+ ( 0x43, 0x11 ), # AL=0xdd, AF=0 CF=1
+ ( 0x43, 0x11 ), # AL=0xdd, AF=1 CF=0
+ ( 0x43, 0x11 ), # AL=0xdd, AF=1 CF=1
+ ( 0x44, 0x15 ), # AL=0xde, AF=0 CF=0
+ ( 0x44, 0x15 ), # AL=0xde, AF=0 CF=1
+ ( 0x44, 0x15 ), # AL=0xde, AF=1 CF=0
+ ( 0x44, 0x15 ), # AL=0xde, AF=1 CF=1
+ ( 0x45, 0x11 ), # AL=0xdf, AF=0 CF=0
+ ( 0x45, 0x11 ), # AL=0xdf, AF=0 CF=1
+ ( 0x45, 0x11 ), # AL=0xdf, AF=1 CF=0
+ ( 0x45, 0x11 ), # AL=0xdf, AF=1 CF=1
+ ( 0x40, 0x01 ), # AL=0xe0, AF=0 CF=0
+ ( 0x40, 0x01 ), # AL=0xe0, AF=0 CF=1
+ ( 0x46, 0x11 ), # AL=0xe0, AF=1 CF=0
+ ( 0x46, 0x11 ), # AL=0xe0, AF=1 CF=1
+ ( 0x41, 0x05 ), # AL=0xe1, AF=0 CF=0
+ ( 0x41, 0x05 ), # AL=0xe1, AF=0 CF=1
+ ( 0x47, 0x15 ), # AL=0xe1, AF=1 CF=0
+ ( 0x47, 0x15 ), # AL=0xe1, AF=1 CF=1
+ ( 0x42, 0x05 ), # AL=0xe2, AF=0 CF=0
+ ( 0x42, 0x05 ), # AL=0xe2, AF=0 CF=1
+ ( 0x48, 0x15 ), # AL=0xe2, AF=1 CF=0
+ ( 0x48, 0x15 ), # AL=0xe2, AF=1 CF=1
+ ( 0x43, 0x01 ), # AL=0xe3, AF=0 CF=0
+ ( 0x43, 0x01 ), # AL=0xe3, AF=0 CF=1
+ ( 0x49, 0x11 ), # AL=0xe3, AF=1 CF=0
+ ( 0x49, 0x11 ), # AL=0xe3, AF=1 CF=1
+ ( 0x44, 0x05 ), # AL=0xe4, AF=0 CF=0
+ ( 0x44, 0x05 ), # AL=0xe4, AF=0 CF=1
+ ( 0x4a, 0x11 ), # AL=0xe4, AF=1 CF=0
+ ( 0x4a, 0x11 ), # AL=0xe4, AF=1 CF=1
+ ( 0x45, 0x01 ), # AL=0xe5, AF=0 CF=0
+ ( 0x45, 0x01 ), # AL=0xe5, AF=0 CF=1
+ ( 0x4b, 0x15 ), # AL=0xe5, AF=1 CF=0
+ ( 0x4b, 0x15 ), # AL=0xe5, AF=1 CF=1
+ ( 0x46, 0x01 ), # AL=0xe6, AF=0 CF=0
+ ( 0x46, 0x01 ), # AL=0xe6, AF=0 CF=1
+ ( 0x4c, 0x11 ), # AL=0xe6, AF=1 CF=0
+ ( 0x4c, 0x11 ), # AL=0xe6, AF=1 CF=1
+ ( 0x47, 0x05 ), # AL=0xe7, AF=0 CF=0
+ ( 0x47, 0x05 ), # AL=0xe7, AF=0 CF=1
+ ( 0x4d, 0x15 ), # AL=0xe7, AF=1 CF=0
+ ( 0x4d, 0x15 ), # AL=0xe7, AF=1 CF=1
+ ( 0x48, 0x05 ), # AL=0xe8, AF=0 CF=0
+ ( 0x48, 0x05 ), # AL=0xe8, AF=0 CF=1
+ ( 0x4e, 0x15 ), # AL=0xe8, AF=1 CF=0
+ ( 0x4e, 0x15 ), # AL=0xe8, AF=1 CF=1
+ ( 0x49, 0x01 ), # AL=0xe9, AF=0 CF=0
+ ( 0x49, 0x01 ), # AL=0xe9, AF=0 CF=1
+ ( 0x4f, 0x11 ), # AL=0xe9, AF=1 CF=0
+ ( 0x4f, 0x11 ), # AL=0xe9, AF=1 CF=1
+ ( 0x50, 0x15 ), # AL=0xea, AF=0 CF=0
+ ( 0x50, 0x15 ), # AL=0xea, AF=0 CF=1
+ ( 0x50, 0x15 ), # AL=0xea, AF=1 CF=0
+ ( 0x50, 0x15 ), # AL=0xea, AF=1 CF=1
+ ( 0x51, 0x11 ), # AL=0xeb, AF=0 CF=0
+ ( 0x51, 0x11 ), # AL=0xeb, AF=0 CF=1
+ ( 0x51, 0x11 ), # AL=0xeb, AF=1 CF=0
+ ( 0x51, 0x11 ), # AL=0xeb, AF=1 CF=1
+ ( 0x52, 0x11 ), # AL=0xec, AF=0 CF=0
+ ( 0x52, 0x11 ), # AL=0xec, AF=0 CF=1
+ ( 0x52, 0x11 ), # AL=0xec, AF=1 CF=0
+ ( 0x52, 0x11 ), # AL=0xec, AF=1 CF=1
+ ( 0x53, 0x15 ), # AL=0xed, AF=0 CF=0
+ ( 0x53, 0x15 ), # AL=0xed, AF=0 CF=1
+ ( 0x53, 0x15 ), # AL=0xed, AF=1 CF=0
+ ( 0x53, 0x15 ), # AL=0xed, AF=1 CF=1
+ ( 0x54, 0x11 ), # AL=0xee, AF=0 CF=0
+ ( 0x54, 0x11 ), # AL=0xee, AF=0 CF=1
+ ( 0x54, 0x11 ), # AL=0xee, AF=1 CF=0
+ ( 0x54, 0x11 ), # AL=0xee, AF=1 CF=1
+ ( 0x55, 0x15 ), # AL=0xef, AF=0 CF=0
+ ( 0x55, 0x15 ), # AL=0xef, AF=0 CF=1
+ ( 0x55, 0x15 ), # AL=0xef, AF=1 CF=0
+ ( 0x55, 0x15 ), # AL=0xef, AF=1 CF=1
+ ( 0x50, 0x05 ), # AL=0xf0, AF=0 CF=0
+ ( 0x50, 0x05 ), # AL=0xf0, AF=0 CF=1
+ ( 0x56, 0x15 ), # AL=0xf0, AF=1 CF=0
+ ( 0x56, 0x15 ), # AL=0xf0, AF=1 CF=1
+ ( 0x51, 0x01 ), # AL=0xf1, AF=0 CF=0
+ ( 0x51, 0x01 ), # AL=0xf1, AF=0 CF=1
+ ( 0x57, 0x11 ), # AL=0xf1, AF=1 CF=0
+ ( 0x57, 0x11 ), # AL=0xf1, AF=1 CF=1
+ ( 0x52, 0x01 ), # AL=0xf2, AF=0 CF=0
+ ( 0x52, 0x01 ), # AL=0xf2, AF=0 CF=1
+ ( 0x58, 0x11 ), # AL=0xf2, AF=1 CF=0
+ ( 0x58, 0x11 ), # AL=0xf2, AF=1 CF=1
+ ( 0x53, 0x05 ), # AL=0xf3, AF=0 CF=0
+ ( 0x53, 0x05 ), # AL=0xf3, AF=0 CF=1
+ ( 0x59, 0x15 ), # AL=0xf3, AF=1 CF=0
+ ( 0x59, 0x15 ), # AL=0xf3, AF=1 CF=1
+ ( 0x54, 0x01 ), # AL=0xf4, AF=0 CF=0
+ ( 0x54, 0x01 ), # AL=0xf4, AF=0 CF=1
+ ( 0x5a, 0x15 ), # AL=0xf4, AF=1 CF=0
+ ( 0x5a, 0x15 ), # AL=0xf4, AF=1 CF=1
+ ( 0x55, 0x05 ), # AL=0xf5, AF=0 CF=0
+ ( 0x55, 0x05 ), # AL=0xf5, AF=0 CF=1
+ ( 0x5b, 0x11 ), # AL=0xf5, AF=1 CF=0
+ ( 0x5b, 0x11 ), # AL=0xf5, AF=1 CF=1
+ ( 0x56, 0x05 ), # AL=0xf6, AF=0 CF=0
+ ( 0x56, 0x05 ), # AL=0xf6, AF=0 CF=1
+ ( 0x5c, 0x15 ), # AL=0xf6, AF=1 CF=0
+ ( 0x5c, 0x15 ), # AL=0xf6, AF=1 CF=1
+ ( 0x57, 0x01 ), # AL=0xf7, AF=0 CF=0
+ ( 0x57, 0x01 ), # AL=0xf7, AF=0 CF=1
+ ( 0x5d, 0x11 ), # AL=0xf7, AF=1 CF=0
+ ( 0x5d, 0x11 ), # AL=0xf7, AF=1 CF=1
+ ( 0x58, 0x01 ), # AL=0xf8, AF=0 CF=0
+ ( 0x58, 0x01 ), # AL=0xf8, AF=0 CF=1
+ ( 0x5e, 0x11 ), # AL=0xf8, AF=1 CF=0
+ ( 0x5e, 0x11 ), # AL=0xf8, AF=1 CF=1
+ ( 0x59, 0x05 ), # AL=0xf9, AF=0 CF=0
+ ( 0x59, 0x05 ), # AL=0xf9, AF=0 CF=1
+ ( 0x5f, 0x15 ), # AL=0xf9, AF=1 CF=0
+ ( 0x5f, 0x15 ), # AL=0xf9, AF=1 CF=1
+ ( 0x60, 0x15 ), # AL=0xfa, AF=0 CF=0
+ ( 0x60, 0x15 ), # AL=0xfa, AF=0 CF=1
+ ( 0x60, 0x15 ), # AL=0xfa, AF=1 CF=0
+ ( 0x60, 0x15 ), # AL=0xfa, AF=1 CF=1
+ ( 0x61, 0x11 ), # AL=0xfb, AF=0 CF=0
+ ( 0x61, 0x11 ), # AL=0xfb, AF=0 CF=1
+ ( 0x61, 0x11 ), # AL=0xfb, AF=1 CF=0
+ ( 0x61, 0x11 ), # AL=0xfb, AF=1 CF=1
+ ( 0x62, 0x11 ), # AL=0xfc, AF=0 CF=0
+ ( 0x62, 0x11 ), # AL=0xfc, AF=0 CF=1
+ ( 0x62, 0x11 ), # AL=0xfc, AF=1 CF=0
+ ( 0x62, 0x11 ), # AL=0xfc, AF=1 CF=1
+ ( 0x63, 0x15 ), # AL=0xfd, AF=0 CF=0
+ ( 0x63, 0x15 ), # AL=0xfd, AF=0 CF=1
+ ( 0x63, 0x15 ), # AL=0xfd, AF=1 CF=0
+ ( 0x63, 0x15 ), # AL=0xfd, AF=1 CF=1
+ ( 0x64, 0x11 ), # AL=0xfe, AF=0 CF=0
+ ( 0x64, 0x11 ), # AL=0xfe, AF=0 CF=1
+ ( 0x64, 0x11 ), # AL=0xfe, AF=1 CF=0
+ ( 0x64, 0x11 ), # AL=0xfe, AF=1 CF=1
+ ( 0x65, 0x15 ), # AL=0xff, AF=0 CF=0
+ ( 0x65, 0x15 ), # AL=0xff, AF=0 CF=1
+ ( 0x65, 0x15 ), # AL=0xff, AF=1 CF=0
+ ( 0x65, 0x15 ), # AL=0xff, AF=1 CF=1
+];
+
diff --git a/src/VBox/VMM/testcase/Instructions/itgTableDas.py b/src/VBox/VMM/testcase/Instructions/itgTableDas.py
new file mode 100644
index 00000000..d3603bdc
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/itgTableDas.py
@@ -0,0 +1,1105 @@
+# -*- coding: utf-8 -*-
+# $Id: itgTableDas.py $
+
+"""
+DAS (instruction) result table.
+"""
+
+
+__copyright__ = \
+"""
+Copyright (C) 2012-2020 Oracle Corporation
+
+This file is part of VirtualBox Open Source Edition (OSE), as
+available from http://www.virtualbox.org. This file is free software;
+you can redistribute it and/or modify it under the terms of the GNU
+General Public License (GPL) as published by the Free Software
+Foundation, in version 2 as it comes in the "COPYING" file of the
+VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+"""
+__version__ = "$Revision: 135976 $";
+
+
+## The 32-bit GCC (C99) program that produced the table below.
+g_sItgCProgramDas = \
+"""
+#include <stdio.h>
+
+int main()
+{
+ for (unsigned uInputAL = 0; uInputAL < 256; uInputAL++)
+ for (unsigned fAux = 0; fAux < 2; fAux++)
+ for (unsigned fCarry = 0; fCarry < 2; fCarry++)
+ {
+ unsigned uInputEFlags = fCarry | (fAux << 4);
+ unsigned uResultAL;
+ unsigned uResultEFlags;
+ __asm__ __volatile__("pushl %1\\n"
+ "popfl\\n"
+ "das\\n"
+ "pushf\\n"
+ "pop %1\\n"
+ : "=a" (uResultAL),
+ "=r" (uResultEFlags)
+ : "0" (uInputAL),
+ "1" (uInputEFlags)
+ : "memory"
+ );
+ printf(" ( 0x%02x, 0x%02x ), # AL=0x%02x, AF=%u CF=%u\\n",
+ uResultAL, uResultEFlags & 0xd5, uInputAL, fAux, fCarry);
+ /* 0xd5 = CF, PF, AF, ZF, SF */
+ }
+ return 0;
+}
+""";
+
+
+#
+# Compile and run the above program if requested to do so.
+#
+if __name__ == '__main__':
+ import sys;
+ if len(sys.argv) > 1 and sys.argv[1] == 'gen':
+ import subprocess;
+ oProc = subprocess.Popen(['gcc', '-x', 'c', '-std=gnu99', '-m32', '-o', './itgTableDas', '-'], stdin = subprocess.PIPE);
+ oProc.communicate(g_sItgCProgramDas);
+ oProc.wait();
+ oProc = subprocess.Popen(['./itgTableDas',]).wait();
+ sys.exit(0);
+
+
+
+##
+# The DAS results.
+#
+# The index / input relation is: index = (AL << 2) | (CF << 1) | AF
+#
+g_aItgDasResults = \
+[
+ ( 0x00, 0x44 ), # AL=0x00, AF=0 CF=0
+ ( 0xa0, 0x85 ), # AL=0x00, AF=0 CF=1
+ ( 0xfa, 0x95 ), # AL=0x00, AF=1 CF=0
+ ( 0x9a, 0x95 ), # AL=0x00, AF=1 CF=1
+ ( 0x01, 0x00 ), # AL=0x01, AF=0 CF=0
+ ( 0xa1, 0x81 ), # AL=0x01, AF=0 CF=1
+ ( 0xfb, 0x91 ), # AL=0x01, AF=1 CF=0
+ ( 0x9b, 0x91 ), # AL=0x01, AF=1 CF=1
+ ( 0x02, 0x00 ), # AL=0x02, AF=0 CF=0
+ ( 0xa2, 0x81 ), # AL=0x02, AF=0 CF=1
+ ( 0xfc, 0x95 ), # AL=0x02, AF=1 CF=0
+ ( 0x9c, 0x95 ), # AL=0x02, AF=1 CF=1
+ ( 0x03, 0x04 ), # AL=0x03, AF=0 CF=0
+ ( 0xa3, 0x85 ), # AL=0x03, AF=0 CF=1
+ ( 0xfd, 0x91 ), # AL=0x03, AF=1 CF=0
+ ( 0x9d, 0x91 ), # AL=0x03, AF=1 CF=1
+ ( 0x04, 0x00 ), # AL=0x04, AF=0 CF=0
+ ( 0xa4, 0x81 ), # AL=0x04, AF=0 CF=1
+ ( 0xfe, 0x91 ), # AL=0x04, AF=1 CF=0
+ ( 0x9e, 0x91 ), # AL=0x04, AF=1 CF=1
+ ( 0x05, 0x04 ), # AL=0x05, AF=0 CF=0
+ ( 0xa5, 0x85 ), # AL=0x05, AF=0 CF=1
+ ( 0xff, 0x95 ), # AL=0x05, AF=1 CF=0
+ ( 0x9f, 0x95 ), # AL=0x05, AF=1 CF=1
+ ( 0x06, 0x04 ), # AL=0x06, AF=0 CF=0
+ ( 0xa6, 0x85 ), # AL=0x06, AF=0 CF=1
+ ( 0x00, 0x54 ), # AL=0x06, AF=1 CF=0
+ ( 0xa0, 0x95 ), # AL=0x06, AF=1 CF=1
+ ( 0x07, 0x00 ), # AL=0x07, AF=0 CF=0
+ ( 0xa7, 0x81 ), # AL=0x07, AF=0 CF=1
+ ( 0x01, 0x10 ), # AL=0x07, AF=1 CF=0
+ ( 0xa1, 0x91 ), # AL=0x07, AF=1 CF=1
+ ( 0x08, 0x00 ), # AL=0x08, AF=0 CF=0
+ ( 0xa8, 0x81 ), # AL=0x08, AF=0 CF=1
+ ( 0x02, 0x10 ), # AL=0x08, AF=1 CF=0
+ ( 0xa2, 0x91 ), # AL=0x08, AF=1 CF=1
+ ( 0x09, 0x04 ), # AL=0x09, AF=0 CF=0
+ ( 0xa9, 0x85 ), # AL=0x09, AF=0 CF=1
+ ( 0x03, 0x14 ), # AL=0x09, AF=1 CF=0
+ ( 0xa3, 0x95 ), # AL=0x09, AF=1 CF=1
+ ( 0x04, 0x10 ), # AL=0x0a, AF=0 CF=0
+ ( 0xa4, 0x91 ), # AL=0x0a, AF=0 CF=1
+ ( 0x04, 0x10 ), # AL=0x0a, AF=1 CF=0
+ ( 0xa4, 0x91 ), # AL=0x0a, AF=1 CF=1
+ ( 0x05, 0x14 ), # AL=0x0b, AF=0 CF=0
+ ( 0xa5, 0x95 ), # AL=0x0b, AF=0 CF=1
+ ( 0x05, 0x14 ), # AL=0x0b, AF=1 CF=0
+ ( 0xa5, 0x95 ), # AL=0x0b, AF=1 CF=1
+ ( 0x06, 0x14 ), # AL=0x0c, AF=0 CF=0
+ ( 0xa6, 0x95 ), # AL=0x0c, AF=0 CF=1
+ ( 0x06, 0x14 ), # AL=0x0c, AF=1 CF=0
+ ( 0xa6, 0x95 ), # AL=0x0c, AF=1 CF=1
+ ( 0x07, 0x10 ), # AL=0x0d, AF=0 CF=0
+ ( 0xa7, 0x91 ), # AL=0x0d, AF=0 CF=1
+ ( 0x07, 0x10 ), # AL=0x0d, AF=1 CF=0
+ ( 0xa7, 0x91 ), # AL=0x0d, AF=1 CF=1
+ ( 0x08, 0x10 ), # AL=0x0e, AF=0 CF=0
+ ( 0xa8, 0x91 ), # AL=0x0e, AF=0 CF=1
+ ( 0x08, 0x10 ), # AL=0x0e, AF=1 CF=0
+ ( 0xa8, 0x91 ), # AL=0x0e, AF=1 CF=1
+ ( 0x09, 0x14 ), # AL=0x0f, AF=0 CF=0
+ ( 0xa9, 0x95 ), # AL=0x0f, AF=0 CF=1
+ ( 0x09, 0x14 ), # AL=0x0f, AF=1 CF=0
+ ( 0xa9, 0x95 ), # AL=0x0f, AF=1 CF=1
+ ( 0x10, 0x00 ), # AL=0x10, AF=0 CF=0
+ ( 0xb0, 0x81 ), # AL=0x10, AF=0 CF=1
+ ( 0x0a, 0x14 ), # AL=0x10, AF=1 CF=0
+ ( 0xaa, 0x95 ), # AL=0x10, AF=1 CF=1
+ ( 0x11, 0x04 ), # AL=0x11, AF=0 CF=0
+ ( 0xb1, 0x85 ), # AL=0x11, AF=0 CF=1
+ ( 0x0b, 0x10 ), # AL=0x11, AF=1 CF=0
+ ( 0xab, 0x91 ), # AL=0x11, AF=1 CF=1
+ ( 0x12, 0x04 ), # AL=0x12, AF=0 CF=0
+ ( 0xb2, 0x85 ), # AL=0x12, AF=0 CF=1
+ ( 0x0c, 0x14 ), # AL=0x12, AF=1 CF=0
+ ( 0xac, 0x95 ), # AL=0x12, AF=1 CF=1
+ ( 0x13, 0x00 ), # AL=0x13, AF=0 CF=0
+ ( 0xb3, 0x81 ), # AL=0x13, AF=0 CF=1
+ ( 0x0d, 0x10 ), # AL=0x13, AF=1 CF=0
+ ( 0xad, 0x91 ), # AL=0x13, AF=1 CF=1
+ ( 0x14, 0x04 ), # AL=0x14, AF=0 CF=0
+ ( 0xb4, 0x85 ), # AL=0x14, AF=0 CF=1
+ ( 0x0e, 0x10 ), # AL=0x14, AF=1 CF=0
+ ( 0xae, 0x91 ), # AL=0x14, AF=1 CF=1
+ ( 0x15, 0x00 ), # AL=0x15, AF=0 CF=0
+ ( 0xb5, 0x81 ), # AL=0x15, AF=0 CF=1
+ ( 0x0f, 0x14 ), # AL=0x15, AF=1 CF=0
+ ( 0xaf, 0x95 ), # AL=0x15, AF=1 CF=1
+ ( 0x16, 0x00 ), # AL=0x16, AF=0 CF=0
+ ( 0xb6, 0x81 ), # AL=0x16, AF=0 CF=1
+ ( 0x10, 0x10 ), # AL=0x16, AF=1 CF=0
+ ( 0xb0, 0x91 ), # AL=0x16, AF=1 CF=1
+ ( 0x17, 0x04 ), # AL=0x17, AF=0 CF=0
+ ( 0xb7, 0x85 ), # AL=0x17, AF=0 CF=1
+ ( 0x11, 0x14 ), # AL=0x17, AF=1 CF=0
+ ( 0xb1, 0x95 ), # AL=0x17, AF=1 CF=1
+ ( 0x18, 0x04 ), # AL=0x18, AF=0 CF=0
+ ( 0xb8, 0x85 ), # AL=0x18, AF=0 CF=1
+ ( 0x12, 0x14 ), # AL=0x18, AF=1 CF=0
+ ( 0xb2, 0x95 ), # AL=0x18, AF=1 CF=1
+ ( 0x19, 0x00 ), # AL=0x19, AF=0 CF=0
+ ( 0xb9, 0x81 ), # AL=0x19, AF=0 CF=1
+ ( 0x13, 0x10 ), # AL=0x19, AF=1 CF=0
+ ( 0xb3, 0x91 ), # AL=0x19, AF=1 CF=1
+ ( 0x14, 0x14 ), # AL=0x1a, AF=0 CF=0
+ ( 0xb4, 0x95 ), # AL=0x1a, AF=0 CF=1
+ ( 0x14, 0x14 ), # AL=0x1a, AF=1 CF=0
+ ( 0xb4, 0x95 ), # AL=0x1a, AF=1 CF=1
+ ( 0x15, 0x10 ), # AL=0x1b, AF=0 CF=0
+ ( 0xb5, 0x91 ), # AL=0x1b, AF=0 CF=1
+ ( 0x15, 0x10 ), # AL=0x1b, AF=1 CF=0
+ ( 0xb5, 0x91 ), # AL=0x1b, AF=1 CF=1
+ ( 0x16, 0x10 ), # AL=0x1c, AF=0 CF=0
+ ( 0xb6, 0x91 ), # AL=0x1c, AF=0 CF=1
+ ( 0x16, 0x10 ), # AL=0x1c, AF=1 CF=0
+ ( 0xb6, 0x91 ), # AL=0x1c, AF=1 CF=1
+ ( 0x17, 0x14 ), # AL=0x1d, AF=0 CF=0
+ ( 0xb7, 0x95 ), # AL=0x1d, AF=0 CF=1
+ ( 0x17, 0x14 ), # AL=0x1d, AF=1 CF=0
+ ( 0xb7, 0x95 ), # AL=0x1d, AF=1 CF=1
+ ( 0x18, 0x14 ), # AL=0x1e, AF=0 CF=0
+ ( 0xb8, 0x95 ), # AL=0x1e, AF=0 CF=1
+ ( 0x18, 0x14 ), # AL=0x1e, AF=1 CF=0
+ ( 0xb8, 0x95 ), # AL=0x1e, AF=1 CF=1
+ ( 0x19, 0x10 ), # AL=0x1f, AF=0 CF=0
+ ( 0xb9, 0x91 ), # AL=0x1f, AF=0 CF=1
+ ( 0x19, 0x10 ), # AL=0x1f, AF=1 CF=0
+ ( 0xb9, 0x91 ), # AL=0x1f, AF=1 CF=1
+ ( 0x20, 0x00 ), # AL=0x20, AF=0 CF=0
+ ( 0xc0, 0x85 ), # AL=0x20, AF=0 CF=1
+ ( 0x1a, 0x10 ), # AL=0x20, AF=1 CF=0
+ ( 0xba, 0x91 ), # AL=0x20, AF=1 CF=1
+ ( 0x21, 0x04 ), # AL=0x21, AF=0 CF=0
+ ( 0xc1, 0x81 ), # AL=0x21, AF=0 CF=1
+ ( 0x1b, 0x14 ), # AL=0x21, AF=1 CF=0
+ ( 0xbb, 0x95 ), # AL=0x21, AF=1 CF=1
+ ( 0x22, 0x04 ), # AL=0x22, AF=0 CF=0
+ ( 0xc2, 0x81 ), # AL=0x22, AF=0 CF=1
+ ( 0x1c, 0x10 ), # AL=0x22, AF=1 CF=0
+ ( 0xbc, 0x91 ), # AL=0x22, AF=1 CF=1
+ ( 0x23, 0x00 ), # AL=0x23, AF=0 CF=0
+ ( 0xc3, 0x85 ), # AL=0x23, AF=0 CF=1
+ ( 0x1d, 0x14 ), # AL=0x23, AF=1 CF=0
+ ( 0xbd, 0x95 ), # AL=0x23, AF=1 CF=1
+ ( 0x24, 0x04 ), # AL=0x24, AF=0 CF=0
+ ( 0xc4, 0x81 ), # AL=0x24, AF=0 CF=1
+ ( 0x1e, 0x14 ), # AL=0x24, AF=1 CF=0
+ ( 0xbe, 0x95 ), # AL=0x24, AF=1 CF=1
+ ( 0x25, 0x00 ), # AL=0x25, AF=0 CF=0
+ ( 0xc5, 0x85 ), # AL=0x25, AF=0 CF=1
+ ( 0x1f, 0x10 ), # AL=0x25, AF=1 CF=0
+ ( 0xbf, 0x91 ), # AL=0x25, AF=1 CF=1
+ ( 0x26, 0x00 ), # AL=0x26, AF=0 CF=0
+ ( 0xc6, 0x85 ), # AL=0x26, AF=0 CF=1
+ ( 0x20, 0x10 ), # AL=0x26, AF=1 CF=0
+ ( 0xc0, 0x95 ), # AL=0x26, AF=1 CF=1
+ ( 0x27, 0x04 ), # AL=0x27, AF=0 CF=0
+ ( 0xc7, 0x81 ), # AL=0x27, AF=0 CF=1
+ ( 0x21, 0x14 ), # AL=0x27, AF=1 CF=0
+ ( 0xc1, 0x91 ), # AL=0x27, AF=1 CF=1
+ ( 0x28, 0x04 ), # AL=0x28, AF=0 CF=0
+ ( 0xc8, 0x81 ), # AL=0x28, AF=0 CF=1
+ ( 0x22, 0x14 ), # AL=0x28, AF=1 CF=0
+ ( 0xc2, 0x91 ), # AL=0x28, AF=1 CF=1
+ ( 0x29, 0x00 ), # AL=0x29, AF=0 CF=0
+ ( 0xc9, 0x85 ), # AL=0x29, AF=0 CF=1
+ ( 0x23, 0x10 ), # AL=0x29, AF=1 CF=0
+ ( 0xc3, 0x95 ), # AL=0x29, AF=1 CF=1
+ ( 0x24, 0x14 ), # AL=0x2a, AF=0 CF=0
+ ( 0xc4, 0x91 ), # AL=0x2a, AF=0 CF=1
+ ( 0x24, 0x14 ), # AL=0x2a, AF=1 CF=0
+ ( 0xc4, 0x91 ), # AL=0x2a, AF=1 CF=1
+ ( 0x25, 0x10 ), # AL=0x2b, AF=0 CF=0
+ ( 0xc5, 0x95 ), # AL=0x2b, AF=0 CF=1
+ ( 0x25, 0x10 ), # AL=0x2b, AF=1 CF=0
+ ( 0xc5, 0x95 ), # AL=0x2b, AF=1 CF=1
+ ( 0x26, 0x10 ), # AL=0x2c, AF=0 CF=0
+ ( 0xc6, 0x95 ), # AL=0x2c, AF=0 CF=1
+ ( 0x26, 0x10 ), # AL=0x2c, AF=1 CF=0
+ ( 0xc6, 0x95 ), # AL=0x2c, AF=1 CF=1
+ ( 0x27, 0x14 ), # AL=0x2d, AF=0 CF=0
+ ( 0xc7, 0x91 ), # AL=0x2d, AF=0 CF=1
+ ( 0x27, 0x14 ), # AL=0x2d, AF=1 CF=0
+ ( 0xc7, 0x91 ), # AL=0x2d, AF=1 CF=1
+ ( 0x28, 0x14 ), # AL=0x2e, AF=0 CF=0
+ ( 0xc8, 0x91 ), # AL=0x2e, AF=0 CF=1
+ ( 0x28, 0x14 ), # AL=0x2e, AF=1 CF=0
+ ( 0xc8, 0x91 ), # AL=0x2e, AF=1 CF=1
+ ( 0x29, 0x10 ), # AL=0x2f, AF=0 CF=0
+ ( 0xc9, 0x95 ), # AL=0x2f, AF=0 CF=1
+ ( 0x29, 0x10 ), # AL=0x2f, AF=1 CF=0
+ ( 0xc9, 0x95 ), # AL=0x2f, AF=1 CF=1
+ ( 0x30, 0x04 ), # AL=0x30, AF=0 CF=0
+ ( 0xd0, 0x81 ), # AL=0x30, AF=0 CF=1
+ ( 0x2a, 0x10 ), # AL=0x30, AF=1 CF=0
+ ( 0xca, 0x95 ), # AL=0x30, AF=1 CF=1
+ ( 0x31, 0x00 ), # AL=0x31, AF=0 CF=0
+ ( 0xd1, 0x85 ), # AL=0x31, AF=0 CF=1
+ ( 0x2b, 0x14 ), # AL=0x31, AF=1 CF=0
+ ( 0xcb, 0x91 ), # AL=0x31, AF=1 CF=1
+ ( 0x32, 0x00 ), # AL=0x32, AF=0 CF=0
+ ( 0xd2, 0x85 ), # AL=0x32, AF=0 CF=1
+ ( 0x2c, 0x10 ), # AL=0x32, AF=1 CF=0
+ ( 0xcc, 0x95 ), # AL=0x32, AF=1 CF=1
+ ( 0x33, 0x04 ), # AL=0x33, AF=0 CF=0
+ ( 0xd3, 0x81 ), # AL=0x33, AF=0 CF=1
+ ( 0x2d, 0x14 ), # AL=0x33, AF=1 CF=0
+ ( 0xcd, 0x91 ), # AL=0x33, AF=1 CF=1
+ ( 0x34, 0x00 ), # AL=0x34, AF=0 CF=0
+ ( 0xd4, 0x85 ), # AL=0x34, AF=0 CF=1
+ ( 0x2e, 0x14 ), # AL=0x34, AF=1 CF=0
+ ( 0xce, 0x91 ), # AL=0x34, AF=1 CF=1
+ ( 0x35, 0x04 ), # AL=0x35, AF=0 CF=0
+ ( 0xd5, 0x81 ), # AL=0x35, AF=0 CF=1
+ ( 0x2f, 0x10 ), # AL=0x35, AF=1 CF=0
+ ( 0xcf, 0x95 ), # AL=0x35, AF=1 CF=1
+ ( 0x36, 0x04 ), # AL=0x36, AF=0 CF=0
+ ( 0xd6, 0x81 ), # AL=0x36, AF=0 CF=1
+ ( 0x30, 0x14 ), # AL=0x36, AF=1 CF=0
+ ( 0xd0, 0x91 ), # AL=0x36, AF=1 CF=1
+ ( 0x37, 0x00 ), # AL=0x37, AF=0 CF=0
+ ( 0xd7, 0x85 ), # AL=0x37, AF=0 CF=1
+ ( 0x31, 0x10 ), # AL=0x37, AF=1 CF=0
+ ( 0xd1, 0x95 ), # AL=0x37, AF=1 CF=1
+ ( 0x38, 0x00 ), # AL=0x38, AF=0 CF=0
+ ( 0xd8, 0x85 ), # AL=0x38, AF=0 CF=1
+ ( 0x32, 0x10 ), # AL=0x38, AF=1 CF=0
+ ( 0xd2, 0x95 ), # AL=0x38, AF=1 CF=1
+ ( 0x39, 0x04 ), # AL=0x39, AF=0 CF=0
+ ( 0xd9, 0x81 ), # AL=0x39, AF=0 CF=1
+ ( 0x33, 0x14 ), # AL=0x39, AF=1 CF=0
+ ( 0xd3, 0x91 ), # AL=0x39, AF=1 CF=1
+ ( 0x34, 0x10 ), # AL=0x3a, AF=0 CF=0
+ ( 0xd4, 0x95 ), # AL=0x3a, AF=0 CF=1
+ ( 0x34, 0x10 ), # AL=0x3a, AF=1 CF=0
+ ( 0xd4, 0x95 ), # AL=0x3a, AF=1 CF=1
+ ( 0x35, 0x14 ), # AL=0x3b, AF=0 CF=0
+ ( 0xd5, 0x91 ), # AL=0x3b, AF=0 CF=1
+ ( 0x35, 0x14 ), # AL=0x3b, AF=1 CF=0
+ ( 0xd5, 0x91 ), # AL=0x3b, AF=1 CF=1
+ ( 0x36, 0x14 ), # AL=0x3c, AF=0 CF=0
+ ( 0xd6, 0x91 ), # AL=0x3c, AF=0 CF=1
+ ( 0x36, 0x14 ), # AL=0x3c, AF=1 CF=0
+ ( 0xd6, 0x91 ), # AL=0x3c, AF=1 CF=1
+ ( 0x37, 0x10 ), # AL=0x3d, AF=0 CF=0
+ ( 0xd7, 0x95 ), # AL=0x3d, AF=0 CF=1
+ ( 0x37, 0x10 ), # AL=0x3d, AF=1 CF=0
+ ( 0xd7, 0x95 ), # AL=0x3d, AF=1 CF=1
+ ( 0x38, 0x10 ), # AL=0x3e, AF=0 CF=0
+ ( 0xd8, 0x95 ), # AL=0x3e, AF=0 CF=1
+ ( 0x38, 0x10 ), # AL=0x3e, AF=1 CF=0
+ ( 0xd8, 0x95 ), # AL=0x3e, AF=1 CF=1
+ ( 0x39, 0x14 ), # AL=0x3f, AF=0 CF=0
+ ( 0xd9, 0x91 ), # AL=0x3f, AF=0 CF=1
+ ( 0x39, 0x14 ), # AL=0x3f, AF=1 CF=0
+ ( 0xd9, 0x91 ), # AL=0x3f, AF=1 CF=1
+ ( 0x40, 0x00 ), # AL=0x40, AF=0 CF=0
+ ( 0xe0, 0x81 ), # AL=0x40, AF=0 CF=1
+ ( 0x3a, 0x14 ), # AL=0x40, AF=1 CF=0
+ ( 0xda, 0x91 ), # AL=0x40, AF=1 CF=1
+ ( 0x41, 0x04 ), # AL=0x41, AF=0 CF=0
+ ( 0xe1, 0x85 ), # AL=0x41, AF=0 CF=1
+ ( 0x3b, 0x10 ), # AL=0x41, AF=1 CF=0
+ ( 0xdb, 0x95 ), # AL=0x41, AF=1 CF=1
+ ( 0x42, 0x04 ), # AL=0x42, AF=0 CF=0
+ ( 0xe2, 0x85 ), # AL=0x42, AF=0 CF=1
+ ( 0x3c, 0x14 ), # AL=0x42, AF=1 CF=0
+ ( 0xdc, 0x91 ), # AL=0x42, AF=1 CF=1
+ ( 0x43, 0x00 ), # AL=0x43, AF=0 CF=0
+ ( 0xe3, 0x81 ), # AL=0x43, AF=0 CF=1
+ ( 0x3d, 0x10 ), # AL=0x43, AF=1 CF=0
+ ( 0xdd, 0x95 ), # AL=0x43, AF=1 CF=1
+ ( 0x44, 0x04 ), # AL=0x44, AF=0 CF=0
+ ( 0xe4, 0x85 ), # AL=0x44, AF=0 CF=1
+ ( 0x3e, 0x10 ), # AL=0x44, AF=1 CF=0
+ ( 0xde, 0x95 ), # AL=0x44, AF=1 CF=1
+ ( 0x45, 0x00 ), # AL=0x45, AF=0 CF=0
+ ( 0xe5, 0x81 ), # AL=0x45, AF=0 CF=1
+ ( 0x3f, 0x14 ), # AL=0x45, AF=1 CF=0
+ ( 0xdf, 0x91 ), # AL=0x45, AF=1 CF=1
+ ( 0x46, 0x00 ), # AL=0x46, AF=0 CF=0
+ ( 0xe6, 0x81 ), # AL=0x46, AF=0 CF=1
+ ( 0x40, 0x10 ), # AL=0x46, AF=1 CF=0
+ ( 0xe0, 0x91 ), # AL=0x46, AF=1 CF=1
+ ( 0x47, 0x04 ), # AL=0x47, AF=0 CF=0
+ ( 0xe7, 0x85 ), # AL=0x47, AF=0 CF=1
+ ( 0x41, 0x14 ), # AL=0x47, AF=1 CF=0
+ ( 0xe1, 0x95 ), # AL=0x47, AF=1 CF=1
+ ( 0x48, 0x04 ), # AL=0x48, AF=0 CF=0
+ ( 0xe8, 0x85 ), # AL=0x48, AF=0 CF=1
+ ( 0x42, 0x14 ), # AL=0x48, AF=1 CF=0
+ ( 0xe2, 0x95 ), # AL=0x48, AF=1 CF=1
+ ( 0x49, 0x00 ), # AL=0x49, AF=0 CF=0
+ ( 0xe9, 0x81 ), # AL=0x49, AF=0 CF=1
+ ( 0x43, 0x10 ), # AL=0x49, AF=1 CF=0
+ ( 0xe3, 0x91 ), # AL=0x49, AF=1 CF=1
+ ( 0x44, 0x14 ), # AL=0x4a, AF=0 CF=0
+ ( 0xe4, 0x95 ), # AL=0x4a, AF=0 CF=1
+ ( 0x44, 0x14 ), # AL=0x4a, AF=1 CF=0
+ ( 0xe4, 0x95 ), # AL=0x4a, AF=1 CF=1
+ ( 0x45, 0x10 ), # AL=0x4b, AF=0 CF=0
+ ( 0xe5, 0x91 ), # AL=0x4b, AF=0 CF=1
+ ( 0x45, 0x10 ), # AL=0x4b, AF=1 CF=0
+ ( 0xe5, 0x91 ), # AL=0x4b, AF=1 CF=1
+ ( 0x46, 0x10 ), # AL=0x4c, AF=0 CF=0
+ ( 0xe6, 0x91 ), # AL=0x4c, AF=0 CF=1
+ ( 0x46, 0x10 ), # AL=0x4c, AF=1 CF=0
+ ( 0xe6, 0x91 ), # AL=0x4c, AF=1 CF=1
+ ( 0x47, 0x14 ), # AL=0x4d, AF=0 CF=0
+ ( 0xe7, 0x95 ), # AL=0x4d, AF=0 CF=1
+ ( 0x47, 0x14 ), # AL=0x4d, AF=1 CF=0
+ ( 0xe7, 0x95 ), # AL=0x4d, AF=1 CF=1
+ ( 0x48, 0x14 ), # AL=0x4e, AF=0 CF=0
+ ( 0xe8, 0x95 ), # AL=0x4e, AF=0 CF=1
+ ( 0x48, 0x14 ), # AL=0x4e, AF=1 CF=0
+ ( 0xe8, 0x95 ), # AL=0x4e, AF=1 CF=1
+ ( 0x49, 0x10 ), # AL=0x4f, AF=0 CF=0
+ ( 0xe9, 0x91 ), # AL=0x4f, AF=0 CF=1
+ ( 0x49, 0x10 ), # AL=0x4f, AF=1 CF=0
+ ( 0xe9, 0x91 ), # AL=0x4f, AF=1 CF=1
+ ( 0x50, 0x04 ), # AL=0x50, AF=0 CF=0
+ ( 0xf0, 0x85 ), # AL=0x50, AF=0 CF=1
+ ( 0x4a, 0x10 ), # AL=0x50, AF=1 CF=0
+ ( 0xea, 0x91 ), # AL=0x50, AF=1 CF=1
+ ( 0x51, 0x00 ), # AL=0x51, AF=0 CF=0
+ ( 0xf1, 0x81 ), # AL=0x51, AF=0 CF=1
+ ( 0x4b, 0x14 ), # AL=0x51, AF=1 CF=0
+ ( 0xeb, 0x95 ), # AL=0x51, AF=1 CF=1
+ ( 0x52, 0x00 ), # AL=0x52, AF=0 CF=0
+ ( 0xf2, 0x81 ), # AL=0x52, AF=0 CF=1
+ ( 0x4c, 0x10 ), # AL=0x52, AF=1 CF=0
+ ( 0xec, 0x91 ), # AL=0x52, AF=1 CF=1
+ ( 0x53, 0x04 ), # AL=0x53, AF=0 CF=0
+ ( 0xf3, 0x85 ), # AL=0x53, AF=0 CF=1
+ ( 0x4d, 0x14 ), # AL=0x53, AF=1 CF=0
+ ( 0xed, 0x95 ), # AL=0x53, AF=1 CF=1
+ ( 0x54, 0x00 ), # AL=0x54, AF=0 CF=0
+ ( 0xf4, 0x81 ), # AL=0x54, AF=0 CF=1
+ ( 0x4e, 0x14 ), # AL=0x54, AF=1 CF=0
+ ( 0xee, 0x95 ), # AL=0x54, AF=1 CF=1
+ ( 0x55, 0x04 ), # AL=0x55, AF=0 CF=0
+ ( 0xf5, 0x85 ), # AL=0x55, AF=0 CF=1
+ ( 0x4f, 0x10 ), # AL=0x55, AF=1 CF=0
+ ( 0xef, 0x91 ), # AL=0x55, AF=1 CF=1
+ ( 0x56, 0x04 ), # AL=0x56, AF=0 CF=0
+ ( 0xf6, 0x85 ), # AL=0x56, AF=0 CF=1
+ ( 0x50, 0x14 ), # AL=0x56, AF=1 CF=0
+ ( 0xf0, 0x95 ), # AL=0x56, AF=1 CF=1
+ ( 0x57, 0x00 ), # AL=0x57, AF=0 CF=0
+ ( 0xf7, 0x81 ), # AL=0x57, AF=0 CF=1
+ ( 0x51, 0x10 ), # AL=0x57, AF=1 CF=0
+ ( 0xf1, 0x91 ), # AL=0x57, AF=1 CF=1
+ ( 0x58, 0x00 ), # AL=0x58, AF=0 CF=0
+ ( 0xf8, 0x81 ), # AL=0x58, AF=0 CF=1
+ ( 0x52, 0x10 ), # AL=0x58, AF=1 CF=0
+ ( 0xf2, 0x91 ), # AL=0x58, AF=1 CF=1
+ ( 0x59, 0x04 ), # AL=0x59, AF=0 CF=0
+ ( 0xf9, 0x85 ), # AL=0x59, AF=0 CF=1
+ ( 0x53, 0x14 ), # AL=0x59, AF=1 CF=0
+ ( 0xf3, 0x95 ), # AL=0x59, AF=1 CF=1
+ ( 0x54, 0x10 ), # AL=0x5a, AF=0 CF=0
+ ( 0xf4, 0x91 ), # AL=0x5a, AF=0 CF=1
+ ( 0x54, 0x10 ), # AL=0x5a, AF=1 CF=0
+ ( 0xf4, 0x91 ), # AL=0x5a, AF=1 CF=1
+ ( 0x55, 0x14 ), # AL=0x5b, AF=0 CF=0
+ ( 0xf5, 0x95 ), # AL=0x5b, AF=0 CF=1
+ ( 0x55, 0x14 ), # AL=0x5b, AF=1 CF=0
+ ( 0xf5, 0x95 ), # AL=0x5b, AF=1 CF=1
+ ( 0x56, 0x14 ), # AL=0x5c, AF=0 CF=0
+ ( 0xf6, 0x95 ), # AL=0x5c, AF=0 CF=1
+ ( 0x56, 0x14 ), # AL=0x5c, AF=1 CF=0
+ ( 0xf6, 0x95 ), # AL=0x5c, AF=1 CF=1
+ ( 0x57, 0x10 ), # AL=0x5d, AF=0 CF=0
+ ( 0xf7, 0x91 ), # AL=0x5d, AF=0 CF=1
+ ( 0x57, 0x10 ), # AL=0x5d, AF=1 CF=0
+ ( 0xf7, 0x91 ), # AL=0x5d, AF=1 CF=1
+ ( 0x58, 0x10 ), # AL=0x5e, AF=0 CF=0
+ ( 0xf8, 0x91 ), # AL=0x5e, AF=0 CF=1
+ ( 0x58, 0x10 ), # AL=0x5e, AF=1 CF=0
+ ( 0xf8, 0x91 ), # AL=0x5e, AF=1 CF=1
+ ( 0x59, 0x14 ), # AL=0x5f, AF=0 CF=0
+ ( 0xf9, 0x95 ), # AL=0x5f, AF=0 CF=1
+ ( 0x59, 0x14 ), # AL=0x5f, AF=1 CF=0
+ ( 0xf9, 0x95 ), # AL=0x5f, AF=1 CF=1
+ ( 0x60, 0x04 ), # AL=0x60, AF=0 CF=0
+ ( 0x00, 0x45 ), # AL=0x60, AF=0 CF=1
+ ( 0x5a, 0x14 ), # AL=0x60, AF=1 CF=0
+ ( 0xfa, 0x95 ), # AL=0x60, AF=1 CF=1
+ ( 0x61, 0x00 ), # AL=0x61, AF=0 CF=0
+ ( 0x01, 0x01 ), # AL=0x61, AF=0 CF=1
+ ( 0x5b, 0x10 ), # AL=0x61, AF=1 CF=0
+ ( 0xfb, 0x91 ), # AL=0x61, AF=1 CF=1
+ ( 0x62, 0x00 ), # AL=0x62, AF=0 CF=0
+ ( 0x02, 0x01 ), # AL=0x62, AF=0 CF=1
+ ( 0x5c, 0x14 ), # AL=0x62, AF=1 CF=0
+ ( 0xfc, 0x95 ), # AL=0x62, AF=1 CF=1
+ ( 0x63, 0x04 ), # AL=0x63, AF=0 CF=0
+ ( 0x03, 0x05 ), # AL=0x63, AF=0 CF=1
+ ( 0x5d, 0x10 ), # AL=0x63, AF=1 CF=0
+ ( 0xfd, 0x91 ), # AL=0x63, AF=1 CF=1
+ ( 0x64, 0x00 ), # AL=0x64, AF=0 CF=0
+ ( 0x04, 0x01 ), # AL=0x64, AF=0 CF=1
+ ( 0x5e, 0x10 ), # AL=0x64, AF=1 CF=0
+ ( 0xfe, 0x91 ), # AL=0x64, AF=1 CF=1
+ ( 0x65, 0x04 ), # AL=0x65, AF=0 CF=0
+ ( 0x05, 0x05 ), # AL=0x65, AF=0 CF=1
+ ( 0x5f, 0x14 ), # AL=0x65, AF=1 CF=0
+ ( 0xff, 0x95 ), # AL=0x65, AF=1 CF=1
+ ( 0x66, 0x04 ), # AL=0x66, AF=0 CF=0
+ ( 0x06, 0x05 ), # AL=0x66, AF=0 CF=1
+ ( 0x60, 0x14 ), # AL=0x66, AF=1 CF=0
+ ( 0x00, 0x55 ), # AL=0x66, AF=1 CF=1
+ ( 0x67, 0x00 ), # AL=0x67, AF=0 CF=0
+ ( 0x07, 0x01 ), # AL=0x67, AF=0 CF=1
+ ( 0x61, 0x10 ), # AL=0x67, AF=1 CF=0
+ ( 0x01, 0x11 ), # AL=0x67, AF=1 CF=1
+ ( 0x68, 0x00 ), # AL=0x68, AF=0 CF=0
+ ( 0x08, 0x01 ), # AL=0x68, AF=0 CF=1
+ ( 0x62, 0x10 ), # AL=0x68, AF=1 CF=0
+ ( 0x02, 0x11 ), # AL=0x68, AF=1 CF=1
+ ( 0x69, 0x04 ), # AL=0x69, AF=0 CF=0
+ ( 0x09, 0x05 ), # AL=0x69, AF=0 CF=1
+ ( 0x63, 0x14 ), # AL=0x69, AF=1 CF=0
+ ( 0x03, 0x15 ), # AL=0x69, AF=1 CF=1
+ ( 0x64, 0x10 ), # AL=0x6a, AF=0 CF=0
+ ( 0x04, 0x11 ), # AL=0x6a, AF=0 CF=1
+ ( 0x64, 0x10 ), # AL=0x6a, AF=1 CF=0
+ ( 0x04, 0x11 ), # AL=0x6a, AF=1 CF=1
+ ( 0x65, 0x14 ), # AL=0x6b, AF=0 CF=0
+ ( 0x05, 0x15 ), # AL=0x6b, AF=0 CF=1
+ ( 0x65, 0x14 ), # AL=0x6b, AF=1 CF=0
+ ( 0x05, 0x15 ), # AL=0x6b, AF=1 CF=1
+ ( 0x66, 0x14 ), # AL=0x6c, AF=0 CF=0
+ ( 0x06, 0x15 ), # AL=0x6c, AF=0 CF=1
+ ( 0x66, 0x14 ), # AL=0x6c, AF=1 CF=0
+ ( 0x06, 0x15 ), # AL=0x6c, AF=1 CF=1
+ ( 0x67, 0x10 ), # AL=0x6d, AF=0 CF=0
+ ( 0x07, 0x11 ), # AL=0x6d, AF=0 CF=1
+ ( 0x67, 0x10 ), # AL=0x6d, AF=1 CF=0
+ ( 0x07, 0x11 ), # AL=0x6d, AF=1 CF=1
+ ( 0x68, 0x10 ), # AL=0x6e, AF=0 CF=0
+ ( 0x08, 0x11 ), # AL=0x6e, AF=0 CF=1
+ ( 0x68, 0x10 ), # AL=0x6e, AF=1 CF=0
+ ( 0x08, 0x11 ), # AL=0x6e, AF=1 CF=1
+ ( 0x69, 0x14 ), # AL=0x6f, AF=0 CF=0
+ ( 0x09, 0x15 ), # AL=0x6f, AF=0 CF=1
+ ( 0x69, 0x14 ), # AL=0x6f, AF=1 CF=0
+ ( 0x09, 0x15 ), # AL=0x6f, AF=1 CF=1
+ ( 0x70, 0x00 ), # AL=0x70, AF=0 CF=0
+ ( 0x10, 0x01 ), # AL=0x70, AF=0 CF=1
+ ( 0x6a, 0x14 ), # AL=0x70, AF=1 CF=0
+ ( 0x0a, 0x15 ), # AL=0x70, AF=1 CF=1
+ ( 0x71, 0x04 ), # AL=0x71, AF=0 CF=0
+ ( 0x11, 0x05 ), # AL=0x71, AF=0 CF=1
+ ( 0x6b, 0x10 ), # AL=0x71, AF=1 CF=0
+ ( 0x0b, 0x11 ), # AL=0x71, AF=1 CF=1
+ ( 0x72, 0x04 ), # AL=0x72, AF=0 CF=0
+ ( 0x12, 0x05 ), # AL=0x72, AF=0 CF=1
+ ( 0x6c, 0x14 ), # AL=0x72, AF=1 CF=0
+ ( 0x0c, 0x15 ), # AL=0x72, AF=1 CF=1
+ ( 0x73, 0x00 ), # AL=0x73, AF=0 CF=0
+ ( 0x13, 0x01 ), # AL=0x73, AF=0 CF=1
+ ( 0x6d, 0x10 ), # AL=0x73, AF=1 CF=0
+ ( 0x0d, 0x11 ), # AL=0x73, AF=1 CF=1
+ ( 0x74, 0x04 ), # AL=0x74, AF=0 CF=0
+ ( 0x14, 0x05 ), # AL=0x74, AF=0 CF=1
+ ( 0x6e, 0x10 ), # AL=0x74, AF=1 CF=0
+ ( 0x0e, 0x11 ), # AL=0x74, AF=1 CF=1
+ ( 0x75, 0x00 ), # AL=0x75, AF=0 CF=0
+ ( 0x15, 0x01 ), # AL=0x75, AF=0 CF=1
+ ( 0x6f, 0x14 ), # AL=0x75, AF=1 CF=0
+ ( 0x0f, 0x15 ), # AL=0x75, AF=1 CF=1
+ ( 0x76, 0x00 ), # AL=0x76, AF=0 CF=0
+ ( 0x16, 0x01 ), # AL=0x76, AF=0 CF=1
+ ( 0x70, 0x10 ), # AL=0x76, AF=1 CF=0
+ ( 0x10, 0x11 ), # AL=0x76, AF=1 CF=1
+ ( 0x77, 0x04 ), # AL=0x77, AF=0 CF=0
+ ( 0x17, 0x05 ), # AL=0x77, AF=0 CF=1
+ ( 0x71, 0x14 ), # AL=0x77, AF=1 CF=0
+ ( 0x11, 0x15 ), # AL=0x77, AF=1 CF=1
+ ( 0x78, 0x04 ), # AL=0x78, AF=0 CF=0
+ ( 0x18, 0x05 ), # AL=0x78, AF=0 CF=1
+ ( 0x72, 0x14 ), # AL=0x78, AF=1 CF=0
+ ( 0x12, 0x15 ), # AL=0x78, AF=1 CF=1
+ ( 0x79, 0x00 ), # AL=0x79, AF=0 CF=0
+ ( 0x19, 0x01 ), # AL=0x79, AF=0 CF=1
+ ( 0x73, 0x10 ), # AL=0x79, AF=1 CF=0
+ ( 0x13, 0x11 ), # AL=0x79, AF=1 CF=1
+ ( 0x74, 0x14 ), # AL=0x7a, AF=0 CF=0
+ ( 0x14, 0x15 ), # AL=0x7a, AF=0 CF=1
+ ( 0x74, 0x14 ), # AL=0x7a, AF=1 CF=0
+ ( 0x14, 0x15 ), # AL=0x7a, AF=1 CF=1
+ ( 0x75, 0x10 ), # AL=0x7b, AF=0 CF=0
+ ( 0x15, 0x11 ), # AL=0x7b, AF=0 CF=1
+ ( 0x75, 0x10 ), # AL=0x7b, AF=1 CF=0
+ ( 0x15, 0x11 ), # AL=0x7b, AF=1 CF=1
+ ( 0x76, 0x10 ), # AL=0x7c, AF=0 CF=0
+ ( 0x16, 0x11 ), # AL=0x7c, AF=0 CF=1
+ ( 0x76, 0x10 ), # AL=0x7c, AF=1 CF=0
+ ( 0x16, 0x11 ), # AL=0x7c, AF=1 CF=1
+ ( 0x77, 0x14 ), # AL=0x7d, AF=0 CF=0
+ ( 0x17, 0x15 ), # AL=0x7d, AF=0 CF=1
+ ( 0x77, 0x14 ), # AL=0x7d, AF=1 CF=0
+ ( 0x17, 0x15 ), # AL=0x7d, AF=1 CF=1
+ ( 0x78, 0x14 ), # AL=0x7e, AF=0 CF=0
+ ( 0x18, 0x15 ), # AL=0x7e, AF=0 CF=1
+ ( 0x78, 0x14 ), # AL=0x7e, AF=1 CF=0
+ ( 0x18, 0x15 ), # AL=0x7e, AF=1 CF=1
+ ( 0x79, 0x10 ), # AL=0x7f, AF=0 CF=0
+ ( 0x19, 0x11 ), # AL=0x7f, AF=0 CF=1
+ ( 0x79, 0x10 ), # AL=0x7f, AF=1 CF=0
+ ( 0x19, 0x11 ), # AL=0x7f, AF=1 CF=1
+ ( 0x80, 0x80 ), # AL=0x80, AF=0 CF=0
+ ( 0x20, 0x01 ), # AL=0x80, AF=0 CF=1
+ ( 0x7a, 0x10 ), # AL=0x80, AF=1 CF=0
+ ( 0x1a, 0x11 ), # AL=0x80, AF=1 CF=1
+ ( 0x81, 0x84 ), # AL=0x81, AF=0 CF=0
+ ( 0x21, 0x05 ), # AL=0x81, AF=0 CF=1
+ ( 0x7b, 0x14 ), # AL=0x81, AF=1 CF=0
+ ( 0x1b, 0x15 ), # AL=0x81, AF=1 CF=1
+ ( 0x82, 0x84 ), # AL=0x82, AF=0 CF=0
+ ( 0x22, 0x05 ), # AL=0x82, AF=0 CF=1
+ ( 0x7c, 0x10 ), # AL=0x82, AF=1 CF=0
+ ( 0x1c, 0x11 ), # AL=0x82, AF=1 CF=1
+ ( 0x83, 0x80 ), # AL=0x83, AF=0 CF=0
+ ( 0x23, 0x01 ), # AL=0x83, AF=0 CF=1
+ ( 0x7d, 0x14 ), # AL=0x83, AF=1 CF=0
+ ( 0x1d, 0x15 ), # AL=0x83, AF=1 CF=1
+ ( 0x84, 0x84 ), # AL=0x84, AF=0 CF=0
+ ( 0x24, 0x05 ), # AL=0x84, AF=0 CF=1
+ ( 0x7e, 0x14 ), # AL=0x84, AF=1 CF=0
+ ( 0x1e, 0x15 ), # AL=0x84, AF=1 CF=1
+ ( 0x85, 0x80 ), # AL=0x85, AF=0 CF=0
+ ( 0x25, 0x01 ), # AL=0x85, AF=0 CF=1
+ ( 0x7f, 0x10 ), # AL=0x85, AF=1 CF=0
+ ( 0x1f, 0x11 ), # AL=0x85, AF=1 CF=1
+ ( 0x86, 0x80 ), # AL=0x86, AF=0 CF=0
+ ( 0x26, 0x01 ), # AL=0x86, AF=0 CF=1
+ ( 0x80, 0x90 ), # AL=0x86, AF=1 CF=0
+ ( 0x20, 0x11 ), # AL=0x86, AF=1 CF=1
+ ( 0x87, 0x84 ), # AL=0x87, AF=0 CF=0
+ ( 0x27, 0x05 ), # AL=0x87, AF=0 CF=1
+ ( 0x81, 0x94 ), # AL=0x87, AF=1 CF=0
+ ( 0x21, 0x15 ), # AL=0x87, AF=1 CF=1
+ ( 0x88, 0x84 ), # AL=0x88, AF=0 CF=0
+ ( 0x28, 0x05 ), # AL=0x88, AF=0 CF=1
+ ( 0x82, 0x94 ), # AL=0x88, AF=1 CF=0
+ ( 0x22, 0x15 ), # AL=0x88, AF=1 CF=1
+ ( 0x89, 0x80 ), # AL=0x89, AF=0 CF=0
+ ( 0x29, 0x01 ), # AL=0x89, AF=0 CF=1
+ ( 0x83, 0x90 ), # AL=0x89, AF=1 CF=0
+ ( 0x23, 0x11 ), # AL=0x89, AF=1 CF=1
+ ( 0x84, 0x94 ), # AL=0x8a, AF=0 CF=0
+ ( 0x24, 0x15 ), # AL=0x8a, AF=0 CF=1
+ ( 0x84, 0x94 ), # AL=0x8a, AF=1 CF=0
+ ( 0x24, 0x15 ), # AL=0x8a, AF=1 CF=1
+ ( 0x85, 0x90 ), # AL=0x8b, AF=0 CF=0
+ ( 0x25, 0x11 ), # AL=0x8b, AF=0 CF=1
+ ( 0x85, 0x90 ), # AL=0x8b, AF=1 CF=0
+ ( 0x25, 0x11 ), # AL=0x8b, AF=1 CF=1
+ ( 0x86, 0x90 ), # AL=0x8c, AF=0 CF=0
+ ( 0x26, 0x11 ), # AL=0x8c, AF=0 CF=1
+ ( 0x86, 0x90 ), # AL=0x8c, AF=1 CF=0
+ ( 0x26, 0x11 ), # AL=0x8c, AF=1 CF=1
+ ( 0x87, 0x94 ), # AL=0x8d, AF=0 CF=0
+ ( 0x27, 0x15 ), # AL=0x8d, AF=0 CF=1
+ ( 0x87, 0x94 ), # AL=0x8d, AF=1 CF=0
+ ( 0x27, 0x15 ), # AL=0x8d, AF=1 CF=1
+ ( 0x88, 0x94 ), # AL=0x8e, AF=0 CF=0
+ ( 0x28, 0x15 ), # AL=0x8e, AF=0 CF=1
+ ( 0x88, 0x94 ), # AL=0x8e, AF=1 CF=0
+ ( 0x28, 0x15 ), # AL=0x8e, AF=1 CF=1
+ ( 0x89, 0x90 ), # AL=0x8f, AF=0 CF=0
+ ( 0x29, 0x11 ), # AL=0x8f, AF=0 CF=1
+ ( 0x89, 0x90 ), # AL=0x8f, AF=1 CF=0
+ ( 0x29, 0x11 ), # AL=0x8f, AF=1 CF=1
+ ( 0x90, 0x84 ), # AL=0x90, AF=0 CF=0
+ ( 0x30, 0x05 ), # AL=0x90, AF=0 CF=1
+ ( 0x8a, 0x90 ), # AL=0x90, AF=1 CF=0
+ ( 0x2a, 0x11 ), # AL=0x90, AF=1 CF=1
+ ( 0x91, 0x80 ), # AL=0x91, AF=0 CF=0
+ ( 0x31, 0x01 ), # AL=0x91, AF=0 CF=1
+ ( 0x8b, 0x94 ), # AL=0x91, AF=1 CF=0
+ ( 0x2b, 0x15 ), # AL=0x91, AF=1 CF=1
+ ( 0x92, 0x80 ), # AL=0x92, AF=0 CF=0
+ ( 0x32, 0x01 ), # AL=0x92, AF=0 CF=1
+ ( 0x8c, 0x90 ), # AL=0x92, AF=1 CF=0
+ ( 0x2c, 0x11 ), # AL=0x92, AF=1 CF=1
+ ( 0x93, 0x84 ), # AL=0x93, AF=0 CF=0
+ ( 0x33, 0x05 ), # AL=0x93, AF=0 CF=1
+ ( 0x8d, 0x94 ), # AL=0x93, AF=1 CF=0
+ ( 0x2d, 0x15 ), # AL=0x93, AF=1 CF=1
+ ( 0x94, 0x80 ), # AL=0x94, AF=0 CF=0
+ ( 0x34, 0x01 ), # AL=0x94, AF=0 CF=1
+ ( 0x8e, 0x94 ), # AL=0x94, AF=1 CF=0
+ ( 0x2e, 0x15 ), # AL=0x94, AF=1 CF=1
+ ( 0x95, 0x84 ), # AL=0x95, AF=0 CF=0
+ ( 0x35, 0x05 ), # AL=0x95, AF=0 CF=1
+ ( 0x8f, 0x90 ), # AL=0x95, AF=1 CF=0
+ ( 0x2f, 0x11 ), # AL=0x95, AF=1 CF=1
+ ( 0x96, 0x84 ), # AL=0x96, AF=0 CF=0
+ ( 0x36, 0x05 ), # AL=0x96, AF=0 CF=1
+ ( 0x90, 0x94 ), # AL=0x96, AF=1 CF=0
+ ( 0x30, 0x15 ), # AL=0x96, AF=1 CF=1
+ ( 0x97, 0x80 ), # AL=0x97, AF=0 CF=0
+ ( 0x37, 0x01 ), # AL=0x97, AF=0 CF=1
+ ( 0x91, 0x90 ), # AL=0x97, AF=1 CF=0
+ ( 0x31, 0x11 ), # AL=0x97, AF=1 CF=1
+ ( 0x98, 0x80 ), # AL=0x98, AF=0 CF=0
+ ( 0x38, 0x01 ), # AL=0x98, AF=0 CF=1
+ ( 0x92, 0x90 ), # AL=0x98, AF=1 CF=0
+ ( 0x32, 0x11 ), # AL=0x98, AF=1 CF=1
+ ( 0x99, 0x84 ), # AL=0x99, AF=0 CF=0
+ ( 0x39, 0x05 ), # AL=0x99, AF=0 CF=1
+ ( 0x93, 0x94 ), # AL=0x99, AF=1 CF=0
+ ( 0x33, 0x15 ), # AL=0x99, AF=1 CF=1
+ ( 0x34, 0x11 ), # AL=0x9a, AF=0 CF=0
+ ( 0x34, 0x11 ), # AL=0x9a, AF=0 CF=1
+ ( 0x34, 0x11 ), # AL=0x9a, AF=1 CF=0
+ ( 0x34, 0x11 ), # AL=0x9a, AF=1 CF=1
+ ( 0x35, 0x15 ), # AL=0x9b, AF=0 CF=0
+ ( 0x35, 0x15 ), # AL=0x9b, AF=0 CF=1
+ ( 0x35, 0x15 ), # AL=0x9b, AF=1 CF=0
+ ( 0x35, 0x15 ), # AL=0x9b, AF=1 CF=1
+ ( 0x36, 0x15 ), # AL=0x9c, AF=0 CF=0
+ ( 0x36, 0x15 ), # AL=0x9c, AF=0 CF=1
+ ( 0x36, 0x15 ), # AL=0x9c, AF=1 CF=0
+ ( 0x36, 0x15 ), # AL=0x9c, AF=1 CF=1
+ ( 0x37, 0x11 ), # AL=0x9d, AF=0 CF=0
+ ( 0x37, 0x11 ), # AL=0x9d, AF=0 CF=1
+ ( 0x37, 0x11 ), # AL=0x9d, AF=1 CF=0
+ ( 0x37, 0x11 ), # AL=0x9d, AF=1 CF=1
+ ( 0x38, 0x11 ), # AL=0x9e, AF=0 CF=0
+ ( 0x38, 0x11 ), # AL=0x9e, AF=0 CF=1
+ ( 0x38, 0x11 ), # AL=0x9e, AF=1 CF=0
+ ( 0x38, 0x11 ), # AL=0x9e, AF=1 CF=1
+ ( 0x39, 0x15 ), # AL=0x9f, AF=0 CF=0
+ ( 0x39, 0x15 ), # AL=0x9f, AF=0 CF=1
+ ( 0x39, 0x15 ), # AL=0x9f, AF=1 CF=0
+ ( 0x39, 0x15 ), # AL=0x9f, AF=1 CF=1
+ ( 0x40, 0x01 ), # AL=0xa0, AF=0 CF=0
+ ( 0x40, 0x01 ), # AL=0xa0, AF=0 CF=1
+ ( 0x3a, 0x15 ), # AL=0xa0, AF=1 CF=0
+ ( 0x3a, 0x15 ), # AL=0xa0, AF=1 CF=1
+ ( 0x41, 0x05 ), # AL=0xa1, AF=0 CF=0
+ ( 0x41, 0x05 ), # AL=0xa1, AF=0 CF=1
+ ( 0x3b, 0x11 ), # AL=0xa1, AF=1 CF=0
+ ( 0x3b, 0x11 ), # AL=0xa1, AF=1 CF=1
+ ( 0x42, 0x05 ), # AL=0xa2, AF=0 CF=0
+ ( 0x42, 0x05 ), # AL=0xa2, AF=0 CF=1
+ ( 0x3c, 0x15 ), # AL=0xa2, AF=1 CF=0
+ ( 0x3c, 0x15 ), # AL=0xa2, AF=1 CF=1
+ ( 0x43, 0x01 ), # AL=0xa3, AF=0 CF=0
+ ( 0x43, 0x01 ), # AL=0xa3, AF=0 CF=1
+ ( 0x3d, 0x11 ), # AL=0xa3, AF=1 CF=0
+ ( 0x3d, 0x11 ), # AL=0xa3, AF=1 CF=1
+ ( 0x44, 0x05 ), # AL=0xa4, AF=0 CF=0
+ ( 0x44, 0x05 ), # AL=0xa4, AF=0 CF=1
+ ( 0x3e, 0x11 ), # AL=0xa4, AF=1 CF=0
+ ( 0x3e, 0x11 ), # AL=0xa4, AF=1 CF=1
+ ( 0x45, 0x01 ), # AL=0xa5, AF=0 CF=0
+ ( 0x45, 0x01 ), # AL=0xa5, AF=0 CF=1
+ ( 0x3f, 0x15 ), # AL=0xa5, AF=1 CF=0
+ ( 0x3f, 0x15 ), # AL=0xa5, AF=1 CF=1
+ ( 0x46, 0x01 ), # AL=0xa6, AF=0 CF=0
+ ( 0x46, 0x01 ), # AL=0xa6, AF=0 CF=1
+ ( 0x40, 0x11 ), # AL=0xa6, AF=1 CF=0
+ ( 0x40, 0x11 ), # AL=0xa6, AF=1 CF=1
+ ( 0x47, 0x05 ), # AL=0xa7, AF=0 CF=0
+ ( 0x47, 0x05 ), # AL=0xa7, AF=0 CF=1
+ ( 0x41, 0x15 ), # AL=0xa7, AF=1 CF=0
+ ( 0x41, 0x15 ), # AL=0xa7, AF=1 CF=1
+ ( 0x48, 0x05 ), # AL=0xa8, AF=0 CF=0
+ ( 0x48, 0x05 ), # AL=0xa8, AF=0 CF=1
+ ( 0x42, 0x15 ), # AL=0xa8, AF=1 CF=0
+ ( 0x42, 0x15 ), # AL=0xa8, AF=1 CF=1
+ ( 0x49, 0x01 ), # AL=0xa9, AF=0 CF=0
+ ( 0x49, 0x01 ), # AL=0xa9, AF=0 CF=1
+ ( 0x43, 0x11 ), # AL=0xa9, AF=1 CF=0
+ ( 0x43, 0x11 ), # AL=0xa9, AF=1 CF=1
+ ( 0x44, 0x15 ), # AL=0xaa, AF=0 CF=0
+ ( 0x44, 0x15 ), # AL=0xaa, AF=0 CF=1
+ ( 0x44, 0x15 ), # AL=0xaa, AF=1 CF=0
+ ( 0x44, 0x15 ), # AL=0xaa, AF=1 CF=1
+ ( 0x45, 0x11 ), # AL=0xab, AF=0 CF=0
+ ( 0x45, 0x11 ), # AL=0xab, AF=0 CF=1
+ ( 0x45, 0x11 ), # AL=0xab, AF=1 CF=0
+ ( 0x45, 0x11 ), # AL=0xab, AF=1 CF=1
+ ( 0x46, 0x11 ), # AL=0xac, AF=0 CF=0
+ ( 0x46, 0x11 ), # AL=0xac, AF=0 CF=1
+ ( 0x46, 0x11 ), # AL=0xac, AF=1 CF=0
+ ( 0x46, 0x11 ), # AL=0xac, AF=1 CF=1
+ ( 0x47, 0x15 ), # AL=0xad, AF=0 CF=0
+ ( 0x47, 0x15 ), # AL=0xad, AF=0 CF=1
+ ( 0x47, 0x15 ), # AL=0xad, AF=1 CF=0
+ ( 0x47, 0x15 ), # AL=0xad, AF=1 CF=1
+ ( 0x48, 0x15 ), # AL=0xae, AF=0 CF=0
+ ( 0x48, 0x15 ), # AL=0xae, AF=0 CF=1
+ ( 0x48, 0x15 ), # AL=0xae, AF=1 CF=0
+ ( 0x48, 0x15 ), # AL=0xae, AF=1 CF=1
+ ( 0x49, 0x11 ), # AL=0xaf, AF=0 CF=0
+ ( 0x49, 0x11 ), # AL=0xaf, AF=0 CF=1
+ ( 0x49, 0x11 ), # AL=0xaf, AF=1 CF=0
+ ( 0x49, 0x11 ), # AL=0xaf, AF=1 CF=1
+ ( 0x50, 0x05 ), # AL=0xb0, AF=0 CF=0
+ ( 0x50, 0x05 ), # AL=0xb0, AF=0 CF=1
+ ( 0x4a, 0x11 ), # AL=0xb0, AF=1 CF=0
+ ( 0x4a, 0x11 ), # AL=0xb0, AF=1 CF=1
+ ( 0x51, 0x01 ), # AL=0xb1, AF=0 CF=0
+ ( 0x51, 0x01 ), # AL=0xb1, AF=0 CF=1
+ ( 0x4b, 0x15 ), # AL=0xb1, AF=1 CF=0
+ ( 0x4b, 0x15 ), # AL=0xb1, AF=1 CF=1
+ ( 0x52, 0x01 ), # AL=0xb2, AF=0 CF=0
+ ( 0x52, 0x01 ), # AL=0xb2, AF=0 CF=1
+ ( 0x4c, 0x11 ), # AL=0xb2, AF=1 CF=0
+ ( 0x4c, 0x11 ), # AL=0xb2, AF=1 CF=1
+ ( 0x53, 0x05 ), # AL=0xb3, AF=0 CF=0
+ ( 0x53, 0x05 ), # AL=0xb3, AF=0 CF=1
+ ( 0x4d, 0x15 ), # AL=0xb3, AF=1 CF=0
+ ( 0x4d, 0x15 ), # AL=0xb3, AF=1 CF=1
+ ( 0x54, 0x01 ), # AL=0xb4, AF=0 CF=0
+ ( 0x54, 0x01 ), # AL=0xb4, AF=0 CF=1
+ ( 0x4e, 0x15 ), # AL=0xb4, AF=1 CF=0
+ ( 0x4e, 0x15 ), # AL=0xb4, AF=1 CF=1
+ ( 0x55, 0x05 ), # AL=0xb5, AF=0 CF=0
+ ( 0x55, 0x05 ), # AL=0xb5, AF=0 CF=1
+ ( 0x4f, 0x11 ), # AL=0xb5, AF=1 CF=0
+ ( 0x4f, 0x11 ), # AL=0xb5, AF=1 CF=1
+ ( 0x56, 0x05 ), # AL=0xb6, AF=0 CF=0
+ ( 0x56, 0x05 ), # AL=0xb6, AF=0 CF=1
+ ( 0x50, 0x15 ), # AL=0xb6, AF=1 CF=0
+ ( 0x50, 0x15 ), # AL=0xb6, AF=1 CF=1
+ ( 0x57, 0x01 ), # AL=0xb7, AF=0 CF=0
+ ( 0x57, 0x01 ), # AL=0xb7, AF=0 CF=1
+ ( 0x51, 0x11 ), # AL=0xb7, AF=1 CF=0
+ ( 0x51, 0x11 ), # AL=0xb7, AF=1 CF=1
+ ( 0x58, 0x01 ), # AL=0xb8, AF=0 CF=0
+ ( 0x58, 0x01 ), # AL=0xb8, AF=0 CF=1
+ ( 0x52, 0x11 ), # AL=0xb8, AF=1 CF=0
+ ( 0x52, 0x11 ), # AL=0xb8, AF=1 CF=1
+ ( 0x59, 0x05 ), # AL=0xb9, AF=0 CF=0
+ ( 0x59, 0x05 ), # AL=0xb9, AF=0 CF=1
+ ( 0x53, 0x15 ), # AL=0xb9, AF=1 CF=0
+ ( 0x53, 0x15 ), # AL=0xb9, AF=1 CF=1
+ ( 0x54, 0x11 ), # AL=0xba, AF=0 CF=0
+ ( 0x54, 0x11 ), # AL=0xba, AF=0 CF=1
+ ( 0x54, 0x11 ), # AL=0xba, AF=1 CF=0
+ ( 0x54, 0x11 ), # AL=0xba, AF=1 CF=1
+ ( 0x55, 0x15 ), # AL=0xbb, AF=0 CF=0
+ ( 0x55, 0x15 ), # AL=0xbb, AF=0 CF=1
+ ( 0x55, 0x15 ), # AL=0xbb, AF=1 CF=0
+ ( 0x55, 0x15 ), # AL=0xbb, AF=1 CF=1
+ ( 0x56, 0x15 ), # AL=0xbc, AF=0 CF=0
+ ( 0x56, 0x15 ), # AL=0xbc, AF=0 CF=1
+ ( 0x56, 0x15 ), # AL=0xbc, AF=1 CF=0
+ ( 0x56, 0x15 ), # AL=0xbc, AF=1 CF=1
+ ( 0x57, 0x11 ), # AL=0xbd, AF=0 CF=0
+ ( 0x57, 0x11 ), # AL=0xbd, AF=0 CF=1
+ ( 0x57, 0x11 ), # AL=0xbd, AF=1 CF=0
+ ( 0x57, 0x11 ), # AL=0xbd, AF=1 CF=1
+ ( 0x58, 0x11 ), # AL=0xbe, AF=0 CF=0
+ ( 0x58, 0x11 ), # AL=0xbe, AF=0 CF=1
+ ( 0x58, 0x11 ), # AL=0xbe, AF=1 CF=0
+ ( 0x58, 0x11 ), # AL=0xbe, AF=1 CF=1
+ ( 0x59, 0x15 ), # AL=0xbf, AF=0 CF=0
+ ( 0x59, 0x15 ), # AL=0xbf, AF=0 CF=1
+ ( 0x59, 0x15 ), # AL=0xbf, AF=1 CF=0
+ ( 0x59, 0x15 ), # AL=0xbf, AF=1 CF=1
+ ( 0x60, 0x05 ), # AL=0xc0, AF=0 CF=0
+ ( 0x60, 0x05 ), # AL=0xc0, AF=0 CF=1
+ ( 0x5a, 0x15 ), # AL=0xc0, AF=1 CF=0
+ ( 0x5a, 0x15 ), # AL=0xc0, AF=1 CF=1
+ ( 0x61, 0x01 ), # AL=0xc1, AF=0 CF=0
+ ( 0x61, 0x01 ), # AL=0xc1, AF=0 CF=1
+ ( 0x5b, 0x11 ), # AL=0xc1, AF=1 CF=0
+ ( 0x5b, 0x11 ), # AL=0xc1, AF=1 CF=1
+ ( 0x62, 0x01 ), # AL=0xc2, AF=0 CF=0
+ ( 0x62, 0x01 ), # AL=0xc2, AF=0 CF=1
+ ( 0x5c, 0x15 ), # AL=0xc2, AF=1 CF=0
+ ( 0x5c, 0x15 ), # AL=0xc2, AF=1 CF=1
+ ( 0x63, 0x05 ), # AL=0xc3, AF=0 CF=0
+ ( 0x63, 0x05 ), # AL=0xc3, AF=0 CF=1
+ ( 0x5d, 0x11 ), # AL=0xc3, AF=1 CF=0
+ ( 0x5d, 0x11 ), # AL=0xc3, AF=1 CF=1
+ ( 0x64, 0x01 ), # AL=0xc4, AF=0 CF=0
+ ( 0x64, 0x01 ), # AL=0xc4, AF=0 CF=1
+ ( 0x5e, 0x11 ), # AL=0xc4, AF=1 CF=0
+ ( 0x5e, 0x11 ), # AL=0xc4, AF=1 CF=1
+ ( 0x65, 0x05 ), # AL=0xc5, AF=0 CF=0
+ ( 0x65, 0x05 ), # AL=0xc5, AF=0 CF=1
+ ( 0x5f, 0x15 ), # AL=0xc5, AF=1 CF=0
+ ( 0x5f, 0x15 ), # AL=0xc5, AF=1 CF=1
+ ( 0x66, 0x05 ), # AL=0xc6, AF=0 CF=0
+ ( 0x66, 0x05 ), # AL=0xc6, AF=0 CF=1
+ ( 0x60, 0x15 ), # AL=0xc6, AF=1 CF=0
+ ( 0x60, 0x15 ), # AL=0xc6, AF=1 CF=1
+ ( 0x67, 0x01 ), # AL=0xc7, AF=0 CF=0
+ ( 0x67, 0x01 ), # AL=0xc7, AF=0 CF=1
+ ( 0x61, 0x11 ), # AL=0xc7, AF=1 CF=0
+ ( 0x61, 0x11 ), # AL=0xc7, AF=1 CF=1
+ ( 0x68, 0x01 ), # AL=0xc8, AF=0 CF=0
+ ( 0x68, 0x01 ), # AL=0xc8, AF=0 CF=1
+ ( 0x62, 0x11 ), # AL=0xc8, AF=1 CF=0
+ ( 0x62, 0x11 ), # AL=0xc8, AF=1 CF=1
+ ( 0x69, 0x05 ), # AL=0xc9, AF=0 CF=0
+ ( 0x69, 0x05 ), # AL=0xc9, AF=0 CF=1
+ ( 0x63, 0x15 ), # AL=0xc9, AF=1 CF=0
+ ( 0x63, 0x15 ), # AL=0xc9, AF=1 CF=1
+ ( 0x64, 0x11 ), # AL=0xca, AF=0 CF=0
+ ( 0x64, 0x11 ), # AL=0xca, AF=0 CF=1
+ ( 0x64, 0x11 ), # AL=0xca, AF=1 CF=0
+ ( 0x64, 0x11 ), # AL=0xca, AF=1 CF=1
+ ( 0x65, 0x15 ), # AL=0xcb, AF=0 CF=0
+ ( 0x65, 0x15 ), # AL=0xcb, AF=0 CF=1
+ ( 0x65, 0x15 ), # AL=0xcb, AF=1 CF=0
+ ( 0x65, 0x15 ), # AL=0xcb, AF=1 CF=1
+ ( 0x66, 0x15 ), # AL=0xcc, AF=0 CF=0
+ ( 0x66, 0x15 ), # AL=0xcc, AF=0 CF=1
+ ( 0x66, 0x15 ), # AL=0xcc, AF=1 CF=0
+ ( 0x66, 0x15 ), # AL=0xcc, AF=1 CF=1
+ ( 0x67, 0x11 ), # AL=0xcd, AF=0 CF=0
+ ( 0x67, 0x11 ), # AL=0xcd, AF=0 CF=1
+ ( 0x67, 0x11 ), # AL=0xcd, AF=1 CF=0
+ ( 0x67, 0x11 ), # AL=0xcd, AF=1 CF=1
+ ( 0x68, 0x11 ), # AL=0xce, AF=0 CF=0
+ ( 0x68, 0x11 ), # AL=0xce, AF=0 CF=1
+ ( 0x68, 0x11 ), # AL=0xce, AF=1 CF=0
+ ( 0x68, 0x11 ), # AL=0xce, AF=1 CF=1
+ ( 0x69, 0x15 ), # AL=0xcf, AF=0 CF=0
+ ( 0x69, 0x15 ), # AL=0xcf, AF=0 CF=1
+ ( 0x69, 0x15 ), # AL=0xcf, AF=1 CF=0
+ ( 0x69, 0x15 ), # AL=0xcf, AF=1 CF=1
+ ( 0x70, 0x01 ), # AL=0xd0, AF=0 CF=0
+ ( 0x70, 0x01 ), # AL=0xd0, AF=0 CF=1
+ ( 0x6a, 0x15 ), # AL=0xd0, AF=1 CF=0
+ ( 0x6a, 0x15 ), # AL=0xd0, AF=1 CF=1
+ ( 0x71, 0x05 ), # AL=0xd1, AF=0 CF=0
+ ( 0x71, 0x05 ), # AL=0xd1, AF=0 CF=1
+ ( 0x6b, 0x11 ), # AL=0xd1, AF=1 CF=0
+ ( 0x6b, 0x11 ), # AL=0xd1, AF=1 CF=1
+ ( 0x72, 0x05 ), # AL=0xd2, AF=0 CF=0
+ ( 0x72, 0x05 ), # AL=0xd2, AF=0 CF=1
+ ( 0x6c, 0x15 ), # AL=0xd2, AF=1 CF=0
+ ( 0x6c, 0x15 ), # AL=0xd2, AF=1 CF=1
+ ( 0x73, 0x01 ), # AL=0xd3, AF=0 CF=0
+ ( 0x73, 0x01 ), # AL=0xd3, AF=0 CF=1
+ ( 0x6d, 0x11 ), # AL=0xd3, AF=1 CF=0
+ ( 0x6d, 0x11 ), # AL=0xd3, AF=1 CF=1
+ ( 0x74, 0x05 ), # AL=0xd4, AF=0 CF=0
+ ( 0x74, 0x05 ), # AL=0xd4, AF=0 CF=1
+ ( 0x6e, 0x11 ), # AL=0xd4, AF=1 CF=0
+ ( 0x6e, 0x11 ), # AL=0xd4, AF=1 CF=1
+ ( 0x75, 0x01 ), # AL=0xd5, AF=0 CF=0
+ ( 0x75, 0x01 ), # AL=0xd5, AF=0 CF=1
+ ( 0x6f, 0x15 ), # AL=0xd5, AF=1 CF=0
+ ( 0x6f, 0x15 ), # AL=0xd5, AF=1 CF=1
+ ( 0x76, 0x01 ), # AL=0xd6, AF=0 CF=0
+ ( 0x76, 0x01 ), # AL=0xd6, AF=0 CF=1
+ ( 0x70, 0x11 ), # AL=0xd6, AF=1 CF=0
+ ( 0x70, 0x11 ), # AL=0xd6, AF=1 CF=1
+ ( 0x77, 0x05 ), # AL=0xd7, AF=0 CF=0
+ ( 0x77, 0x05 ), # AL=0xd7, AF=0 CF=1
+ ( 0x71, 0x15 ), # AL=0xd7, AF=1 CF=0
+ ( 0x71, 0x15 ), # AL=0xd7, AF=1 CF=1
+ ( 0x78, 0x05 ), # AL=0xd8, AF=0 CF=0
+ ( 0x78, 0x05 ), # AL=0xd8, AF=0 CF=1
+ ( 0x72, 0x15 ), # AL=0xd8, AF=1 CF=0
+ ( 0x72, 0x15 ), # AL=0xd8, AF=1 CF=1
+ ( 0x79, 0x01 ), # AL=0xd9, AF=0 CF=0
+ ( 0x79, 0x01 ), # AL=0xd9, AF=0 CF=1
+ ( 0x73, 0x11 ), # AL=0xd9, AF=1 CF=0
+ ( 0x73, 0x11 ), # AL=0xd9, AF=1 CF=1
+ ( 0x74, 0x15 ), # AL=0xda, AF=0 CF=0
+ ( 0x74, 0x15 ), # AL=0xda, AF=0 CF=1
+ ( 0x74, 0x15 ), # AL=0xda, AF=1 CF=0
+ ( 0x74, 0x15 ), # AL=0xda, AF=1 CF=1
+ ( 0x75, 0x11 ), # AL=0xdb, AF=0 CF=0
+ ( 0x75, 0x11 ), # AL=0xdb, AF=0 CF=1
+ ( 0x75, 0x11 ), # AL=0xdb, AF=1 CF=0
+ ( 0x75, 0x11 ), # AL=0xdb, AF=1 CF=1
+ ( 0x76, 0x11 ), # AL=0xdc, AF=0 CF=0
+ ( 0x76, 0x11 ), # AL=0xdc, AF=0 CF=1
+ ( 0x76, 0x11 ), # AL=0xdc, AF=1 CF=0
+ ( 0x76, 0x11 ), # AL=0xdc, AF=1 CF=1
+ ( 0x77, 0x15 ), # AL=0xdd, AF=0 CF=0
+ ( 0x77, 0x15 ), # AL=0xdd, AF=0 CF=1
+ ( 0x77, 0x15 ), # AL=0xdd, AF=1 CF=0
+ ( 0x77, 0x15 ), # AL=0xdd, AF=1 CF=1
+ ( 0x78, 0x15 ), # AL=0xde, AF=0 CF=0
+ ( 0x78, 0x15 ), # AL=0xde, AF=0 CF=1
+ ( 0x78, 0x15 ), # AL=0xde, AF=1 CF=0
+ ( 0x78, 0x15 ), # AL=0xde, AF=1 CF=1
+ ( 0x79, 0x11 ), # AL=0xdf, AF=0 CF=0
+ ( 0x79, 0x11 ), # AL=0xdf, AF=0 CF=1
+ ( 0x79, 0x11 ), # AL=0xdf, AF=1 CF=0
+ ( 0x79, 0x11 ), # AL=0xdf, AF=1 CF=1
+ ( 0x80, 0x81 ), # AL=0xe0, AF=0 CF=0
+ ( 0x80, 0x81 ), # AL=0xe0, AF=0 CF=1
+ ( 0x7a, 0x11 ), # AL=0xe0, AF=1 CF=0
+ ( 0x7a, 0x11 ), # AL=0xe0, AF=1 CF=1
+ ( 0x81, 0x85 ), # AL=0xe1, AF=0 CF=0
+ ( 0x81, 0x85 ), # AL=0xe1, AF=0 CF=1
+ ( 0x7b, 0x15 ), # AL=0xe1, AF=1 CF=0
+ ( 0x7b, 0x15 ), # AL=0xe1, AF=1 CF=1
+ ( 0x82, 0x85 ), # AL=0xe2, AF=0 CF=0
+ ( 0x82, 0x85 ), # AL=0xe2, AF=0 CF=1
+ ( 0x7c, 0x11 ), # AL=0xe2, AF=1 CF=0
+ ( 0x7c, 0x11 ), # AL=0xe2, AF=1 CF=1
+ ( 0x83, 0x81 ), # AL=0xe3, AF=0 CF=0
+ ( 0x83, 0x81 ), # AL=0xe3, AF=0 CF=1
+ ( 0x7d, 0x15 ), # AL=0xe3, AF=1 CF=0
+ ( 0x7d, 0x15 ), # AL=0xe3, AF=1 CF=1
+ ( 0x84, 0x85 ), # AL=0xe4, AF=0 CF=0
+ ( 0x84, 0x85 ), # AL=0xe4, AF=0 CF=1
+ ( 0x7e, 0x15 ), # AL=0xe4, AF=1 CF=0
+ ( 0x7e, 0x15 ), # AL=0xe4, AF=1 CF=1
+ ( 0x85, 0x81 ), # AL=0xe5, AF=0 CF=0
+ ( 0x85, 0x81 ), # AL=0xe5, AF=0 CF=1
+ ( 0x7f, 0x11 ), # AL=0xe5, AF=1 CF=0
+ ( 0x7f, 0x11 ), # AL=0xe5, AF=1 CF=1
+ ( 0x86, 0x81 ), # AL=0xe6, AF=0 CF=0
+ ( 0x86, 0x81 ), # AL=0xe6, AF=0 CF=1
+ ( 0x80, 0x91 ), # AL=0xe6, AF=1 CF=0
+ ( 0x80, 0x91 ), # AL=0xe6, AF=1 CF=1
+ ( 0x87, 0x85 ), # AL=0xe7, AF=0 CF=0
+ ( 0x87, 0x85 ), # AL=0xe7, AF=0 CF=1
+ ( 0x81, 0x95 ), # AL=0xe7, AF=1 CF=0
+ ( 0x81, 0x95 ), # AL=0xe7, AF=1 CF=1
+ ( 0x88, 0x85 ), # AL=0xe8, AF=0 CF=0
+ ( 0x88, 0x85 ), # AL=0xe8, AF=0 CF=1
+ ( 0x82, 0x95 ), # AL=0xe8, AF=1 CF=0
+ ( 0x82, 0x95 ), # AL=0xe8, AF=1 CF=1
+ ( 0x89, 0x81 ), # AL=0xe9, AF=0 CF=0
+ ( 0x89, 0x81 ), # AL=0xe9, AF=0 CF=1
+ ( 0x83, 0x91 ), # AL=0xe9, AF=1 CF=0
+ ( 0x83, 0x91 ), # AL=0xe9, AF=1 CF=1
+ ( 0x84, 0x95 ), # AL=0xea, AF=0 CF=0
+ ( 0x84, 0x95 ), # AL=0xea, AF=0 CF=1
+ ( 0x84, 0x95 ), # AL=0xea, AF=1 CF=0
+ ( 0x84, 0x95 ), # AL=0xea, AF=1 CF=1
+ ( 0x85, 0x91 ), # AL=0xeb, AF=0 CF=0
+ ( 0x85, 0x91 ), # AL=0xeb, AF=0 CF=1
+ ( 0x85, 0x91 ), # AL=0xeb, AF=1 CF=0
+ ( 0x85, 0x91 ), # AL=0xeb, AF=1 CF=1
+ ( 0x86, 0x91 ), # AL=0xec, AF=0 CF=0
+ ( 0x86, 0x91 ), # AL=0xec, AF=0 CF=1
+ ( 0x86, 0x91 ), # AL=0xec, AF=1 CF=0
+ ( 0x86, 0x91 ), # AL=0xec, AF=1 CF=1
+ ( 0x87, 0x95 ), # AL=0xed, AF=0 CF=0
+ ( 0x87, 0x95 ), # AL=0xed, AF=0 CF=1
+ ( 0x87, 0x95 ), # AL=0xed, AF=1 CF=0
+ ( 0x87, 0x95 ), # AL=0xed, AF=1 CF=1
+ ( 0x88, 0x95 ), # AL=0xee, AF=0 CF=0
+ ( 0x88, 0x95 ), # AL=0xee, AF=0 CF=1
+ ( 0x88, 0x95 ), # AL=0xee, AF=1 CF=0
+ ( 0x88, 0x95 ), # AL=0xee, AF=1 CF=1
+ ( 0x89, 0x91 ), # AL=0xef, AF=0 CF=0
+ ( 0x89, 0x91 ), # AL=0xef, AF=0 CF=1
+ ( 0x89, 0x91 ), # AL=0xef, AF=1 CF=0
+ ( 0x89, 0x91 ), # AL=0xef, AF=1 CF=1
+ ( 0x90, 0x85 ), # AL=0xf0, AF=0 CF=0
+ ( 0x90, 0x85 ), # AL=0xf0, AF=0 CF=1
+ ( 0x8a, 0x91 ), # AL=0xf0, AF=1 CF=0
+ ( 0x8a, 0x91 ), # AL=0xf0, AF=1 CF=1
+ ( 0x91, 0x81 ), # AL=0xf1, AF=0 CF=0
+ ( 0x91, 0x81 ), # AL=0xf1, AF=0 CF=1
+ ( 0x8b, 0x95 ), # AL=0xf1, AF=1 CF=0
+ ( 0x8b, 0x95 ), # AL=0xf1, AF=1 CF=1
+ ( 0x92, 0x81 ), # AL=0xf2, AF=0 CF=0
+ ( 0x92, 0x81 ), # AL=0xf2, AF=0 CF=1
+ ( 0x8c, 0x91 ), # AL=0xf2, AF=1 CF=0
+ ( 0x8c, 0x91 ), # AL=0xf2, AF=1 CF=1
+ ( 0x93, 0x85 ), # AL=0xf3, AF=0 CF=0
+ ( 0x93, 0x85 ), # AL=0xf3, AF=0 CF=1
+ ( 0x8d, 0x95 ), # AL=0xf3, AF=1 CF=0
+ ( 0x8d, 0x95 ), # AL=0xf3, AF=1 CF=1
+ ( 0x94, 0x81 ), # AL=0xf4, AF=0 CF=0
+ ( 0x94, 0x81 ), # AL=0xf4, AF=0 CF=1
+ ( 0x8e, 0x95 ), # AL=0xf4, AF=1 CF=0
+ ( 0x8e, 0x95 ), # AL=0xf4, AF=1 CF=1
+ ( 0x95, 0x85 ), # AL=0xf5, AF=0 CF=0
+ ( 0x95, 0x85 ), # AL=0xf5, AF=0 CF=1
+ ( 0x8f, 0x91 ), # AL=0xf5, AF=1 CF=0
+ ( 0x8f, 0x91 ), # AL=0xf5, AF=1 CF=1
+ ( 0x96, 0x85 ), # AL=0xf6, AF=0 CF=0
+ ( 0x96, 0x85 ), # AL=0xf6, AF=0 CF=1
+ ( 0x90, 0x95 ), # AL=0xf6, AF=1 CF=0
+ ( 0x90, 0x95 ), # AL=0xf6, AF=1 CF=1
+ ( 0x97, 0x81 ), # AL=0xf7, AF=0 CF=0
+ ( 0x97, 0x81 ), # AL=0xf7, AF=0 CF=1
+ ( 0x91, 0x91 ), # AL=0xf7, AF=1 CF=0
+ ( 0x91, 0x91 ), # AL=0xf7, AF=1 CF=1
+ ( 0x98, 0x81 ), # AL=0xf8, AF=0 CF=0
+ ( 0x98, 0x81 ), # AL=0xf8, AF=0 CF=1
+ ( 0x92, 0x91 ), # AL=0xf8, AF=1 CF=0
+ ( 0x92, 0x91 ), # AL=0xf8, AF=1 CF=1
+ ( 0x99, 0x85 ), # AL=0xf9, AF=0 CF=0
+ ( 0x99, 0x85 ), # AL=0xf9, AF=0 CF=1
+ ( 0x93, 0x95 ), # AL=0xf9, AF=1 CF=0
+ ( 0x93, 0x95 ), # AL=0xf9, AF=1 CF=1
+ ( 0x94, 0x91 ), # AL=0xfa, AF=0 CF=0
+ ( 0x94, 0x91 ), # AL=0xfa, AF=0 CF=1
+ ( 0x94, 0x91 ), # AL=0xfa, AF=1 CF=0
+ ( 0x94, 0x91 ), # AL=0xfa, AF=1 CF=1
+ ( 0x95, 0x95 ), # AL=0xfb, AF=0 CF=0
+ ( 0x95, 0x95 ), # AL=0xfb, AF=0 CF=1
+ ( 0x95, 0x95 ), # AL=0xfb, AF=1 CF=0
+ ( 0x95, 0x95 ), # AL=0xfb, AF=1 CF=1
+ ( 0x96, 0x95 ), # AL=0xfc, AF=0 CF=0
+ ( 0x96, 0x95 ), # AL=0xfc, AF=0 CF=1
+ ( 0x96, 0x95 ), # AL=0xfc, AF=1 CF=0
+ ( 0x96, 0x95 ), # AL=0xfc, AF=1 CF=1
+ ( 0x97, 0x91 ), # AL=0xfd, AF=0 CF=0
+ ( 0x97, 0x91 ), # AL=0xfd, AF=0 CF=1
+ ( 0x97, 0x91 ), # AL=0xfd, AF=1 CF=0
+ ( 0x97, 0x91 ), # AL=0xfd, AF=1 CF=1
+ ( 0x98, 0x91 ), # AL=0xfe, AF=0 CF=0
+ ( 0x98, 0x91 ), # AL=0xfe, AF=0 CF=1
+ ( 0x98, 0x91 ), # AL=0xfe, AF=1 CF=0
+ ( 0x98, 0x91 ), # AL=0xfe, AF=1 CF=1
+ ( 0x99, 0x95 ), # AL=0xff, AF=0 CF=0
+ ( 0x99, 0x95 ), # AL=0xff, AF=0 CF=1
+ ( 0x99, 0x95 ), # AL=0xff, AF=1 CF=0
+ ( 0x99, 0x95 ), # AL=0xff, AF=1 CF=1
+];
+
diff --git a/src/VBox/VMM/testcase/Instructions/tstVBInsTstR3.cpp b/src/VBox/VMM/testcase/Instructions/tstVBInsTstR3.cpp
new file mode 100644
index 00000000..014bfbda
--- /dev/null
+++ b/src/VBox/VMM/testcase/Instructions/tstVBInsTstR3.cpp
@@ -0,0 +1,120 @@
+/* $Id: tstVBInsTstR3.cpp $ */
+/** @file
+ * Instruction Test Environment - IPRT ring-3 driver.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/mem.h>
+#include <iprt/string.h>
+#include <iprt/test.h>
+
+#ifdef RT_OS_WINDOWS
+# define NO_LOW_MEM
+#elif defined(RT_OS_OS2) || defined(RT_OS_HAIKU)
+# define NO_LOW_MEM
+#else
+# include <sys/mman.h>
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+#if HC_ARCH_BITS == 64
+typedef uint64_t VBINSTSTREG;
+#else
+typedef uint32_t VBINSTSTREG;
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+RTTEST g_hTest;
+
+
+RT_C_DECLS_BEGIN
+extern void *g_pvLow16Mem4K;
+extern void *g_pvLow32Mem4K;
+DECLASM(void) TestInstrMain(void);
+
+DECLEXPORT(void) VBInsTstFailure(const char *pszMessage);
+DECLEXPORT(void) VBInsTstFailure1(const char *pszFmt, VBINSTSTREG uArg1);
+DECLEXPORT(void) VBInsTstFailure2(const char *pszFmt, VBINSTSTREG uArg1, VBINSTSTREG uArg2);
+DECLEXPORT(void) VBInsTstFailure3(const char *pszFmt, VBINSTSTREG uArg1, VBINSTSTREG uArg2, VBINSTSTREG uArg3);
+DECLEXPORT(void) VBInsTstFailure4(const char *pszFmt, VBINSTSTREG uArg1, VBINSTSTREG uArg2, VBINSTSTREG uArg3, VBINSTSTREG uArg4);
+RT_C_DECLS_END
+
+
+DECLEXPORT(void) VBInsTstFailure(const char *pszMessage)
+{
+ RTTestFailed(g_hTest, "%s", pszMessage);
+}
+
+DECLEXPORT(void) VBInsTstFailure1(const char *pszFmt, VBINSTSTREG uArg1)
+{
+ RTTestFailed(g_hTest, pszFmt, uArg1);
+}
+
+
+DECLEXPORT(void) VBInsTstFailure2(const char *pszFmt, VBINSTSTREG uArg1, VBINSTSTREG uArg2)
+{
+ RTTestFailed(g_hTest, pszFmt, uArg1, uArg2);
+}
+
+
+DECLEXPORT(void) VBInsTstFailure3(const char *pszFmt, VBINSTSTREG uArg1, VBINSTSTREG uArg2, VBINSTSTREG uArg3)
+{
+ RTTestFailed(g_hTest, pszFmt, uArg1, uArg2, uArg3);
+}
+
+
+DECLEXPORT(void) VBInsTstFailure4(const char *pszFmt, VBINSTSTREG uArg1, VBINSTSTREG uArg2, VBINSTSTREG uArg3, VBINSTSTREG uArg4)
+{
+ RTTestFailed(g_hTest, pszFmt, uArg1, uArg2, uArg3, uArg4);
+}
+
+
+
+
+int main()
+{
+ RTEXITCODE rcExit = RTTestInitAndCreate("VBInsTstR3", &g_hTest);
+ if (rcExit != RTEXITCODE_SUCCESS)
+ return rcExit;
+ RTTestBanner(g_hTest);
+
+ int rc = RTMemAllocEx(_4K, 0, RTMEMALLOCEX_FLAGS_16BIT_REACH, &g_pvLow16Mem4K);
+ if (RT_FAILURE(rc))
+ {
+ RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Could not allocate low 16-bit memory (%Rrc)\n", rc);
+ g_pvLow16Mem4K = NULL;
+ }
+
+ rc = RTMemAllocEx(_4K, 0, RTMEMALLOCEX_FLAGS_32BIT_REACH, &g_pvLow32Mem4K);
+ if (RT_FAILURE(rc))
+ {
+ RTTestPrintf(g_hTest, RTTESTLVL_ALWAYS, "Could not allocate low 32-bit memory (%Rrc)\n", rc);
+ g_pvLow32Mem4K = NULL;
+ }
+
+ TestInstrMain();
+
+ return RTTestSummaryAndDestroy(g_hTest);
+}
+
diff --git a/src/VBox/VMM/testcase/Makefile.kmk b/src/VBox/VMM/testcase/Makefile.kmk
new file mode 100644
index 00000000..d7a95af9
--- /dev/null
+++ b/src/VBox/VMM/testcase/Makefile.kmk
@@ -0,0 +1,653 @@
+# $Id: Makefile.kmk $
+## @file
+# Sub-Makefile for the VMM testcases.
+#
+
+#
+# Copyright (C) 2006-2020 Oracle Corporation
+#
+# This file is part of VirtualBox Open Source Edition (OSE), as
+# available from http://www.virtualbox.org. This file is free software;
+# you can redistribute it and/or modify it under the terms of the GNU
+# General Public License (GPL) as published by the Free Software
+# Foundation, in version 2 as it comes in the "COPYING" file of the
+# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+#
+
+SUB_DEPTH = ../../../..
+include $(KBUILD_PATH)/subheader.kmk
+
+#
+# Include sub-makefiles.
+#
+if 0 # Not ready for general consumption yet.
+ include $(PATH_SUB_CURRENT)/Instructions/Makefile.kmk
+endif
+
+#
+# Target lists.
+#
+PROGRAMS += tstVMStructSize tstAsmStructs
+ifdef VBOX_WITH_RAW_MODE
+ PROGRAMS += tstVMStructRC tstAsmStructsRC
+endif
+if !defined(VBOX_ONLY_EXTPACKS) \
+ && ( defined(VBOX_WITH_DTRACE_R3) \
+ || defined(VBOX_WITH_DTRACE_R0) \
+ || defined(VBOX_WITH_DTRACE_RC))
+PROGRAMS += tstVMStructDTrace
+INSTALLS += VMMLibDTraceStructTest
+endif
+ifndef VBOX_ONLY_EXTPACKS_USE_IMPLIBS
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+PROGRAMS += tstGlobalConfigHardened
+DLL += tstGlobalConfig
+ else
+PROGRAMS += tstGlobalConfig
+ endif
+
+ ifdef VBOX_WITH_RAW_MODE
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+PROGRAMS += tstVMMHardened
+DLLS += tstVMM
+ else
+PROGRAMS += tstVMM tstVMM-HM
+ endif
+ ifneq ($(KBUILD_TARGET),win)
+PROGRAMS += tstVMMFork
+ endif
+ endif
+ ifdef VBOX_WITH_TESTCASES
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+PROGRAMS += tstCFGMHardened tstVMREQHardened tstMMHyperHeapHardened tstAnimateHardened
+DLLS += tstCFGM tstVMREQ tstMMHyperHeap tstAnimate
+ else
+PROGRAMS += tstCFGM tstVMREQ tstMMHyperHeap tstAnimate
+ endif
+PROGRAMS += \
+ tstCompressionBenchmark \
+ tstIEMCheckMc \
+ tstSSM \
+ tstVMMR0CallHost-1 \
+ tstVMMR0CallHost-2 \
+ tstX86-FpuSaveRestore
+ ifn1of ($(KBUILD_TARGET).$(KBUILD_TARGET_ARCH), solaris.x86 solaris.amd64 win.amd64 ) ## TODO: Fix the code.
+PROGRAMS += tstX86-1
+ endif
+ ifdef VBOX_WITH_RAW_MODE
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+PROGRAMS += tstMicroHardened
+DLLS += tstMicro
+ else
+PROGRAMS += tstMicro
+ endif
+SYSMODS += tstMicroRC
+ endif
+ ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+PROGRAMS += tstPDMAsyncCompletionHardened tstPDMAsyncCompletionStressHardened
+DLLS += tstPDMAsyncCompletion tstPDMAsyncCompletionStress
+ else
+PROGRAMS += tstPDMAsyncCompletion tstPDMAsyncCompletionStress
+ endif
+ endif
+ endif # VBOX_WITH_TESTCASES
+endif # !VBOX_ONLY_EXTPACKS_USE_IMPLIBS
+
+# Where we put our temporary files (just for simplicity)
+VBOX_VMM_TESTCASE_OUT_DIR := $(PATH_TARGET)/VMM
+BLDDIRS += $(VBOX_VMM_TESTCASE_OUT_DIR)
+
+#
+# We setup two 'other' targets for executing the two structure & alignment
+# validation testcases. Perhaps a bit hackish, but extremely useful.
+#
+ifeq ($(KBUILD_TARGET),$(KBUILD_HOST))
+ ifeq ($(filter-out x86.x86 amd64.amd64 x86.amd64, $(KBUILD_TARGET_ARCH).$(KBUILD_HOST_ARCH)),)
+OTHERS += \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run
+ endif
+endif
+
+# The normal testing pass.
+TESTING += \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run
+
+OTHER_CLEAN += \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.o \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.o.dep \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.mac \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.mac.o \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.mac.lst \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsRC.h \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsHC.h \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructRC.h
+
+#
+# Globals
+#
+VBOX_PATH_VMM_SRC = $(PATH_ROOT)/src/VBox/VMM
+
+#
+# Targets
+#
+ifdef VBOX_WITH_RAW_MODE
+tstVMStructRC_TEMPLATE = VBoxRcExe
+tstVMStructRC_DEFS = VBOX_IN_VMM IN_VMM_RC IN_DIS IN_RT_RC VBOX_WITH_RAW_MODE $(VMM_COMMON_DEFS)
+ ifdef VBOX_WITH_R0_LOGGING
+tstVMStructRC_DEFS += VBOX_WITH_R0_LOGGING
+ endif
+ ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+tstVMStructRC_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ endif
+tstVMStructRC_SOURCES = tstVMStructRC.cpp
+tstVMStructRC_INCS = \
+ $(VBOX_PATH_VMM_SRC)/include \
+ $(VBOX_PATH_VMM_SRC)/PATM
+endif
+
+tstVMStructSize_TEMPLATE= VBOXR3AUTOTST
+ifneq ($(KBUILD_TARGET),win)
+tstVMStructSize_CXXFLAGS += $(VBOX_GCC_Wno-invalid-offsetof)
+endif
+tstVMStructSize_DEFS = VBOX_IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS)
+ifdef VBOX_WITH_RAW_MODE
+tstVMStructSize_DEFS += VBOX_WITH_RAW_MODE
+endif
+tstVMStructSize_INCS = \
+ $(VBOX_PATH_VMM_SRC)/include \
+ $(VBOX_PATH_VMM_SRC)/PATM \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)
+tstVMStructSize_SOURCES = tstVMStructSize.cpp
+ifdef VBOX_WITH_RAW_MODE
+tstVMStructSize.cpp_DEPS= $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructRC.h
+endif
+ifdef VBOX_WITH_R0_LOGGING
+tstVMStructSize_DEFS += VBOX_WITH_R0_LOGGING
+endif
+ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+tstVMStructSize_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+endif
+
+tstAsmStructs_TEMPLATE = VBOXR3AUTOTST
+ifneq ($(KBUILD_TARGET),win)
+tstAsmStructSize_CXXFLAGS += $(VBOX_GCC_Wno-invalid-offsetof)
+endif
+tstAsmStructs_DEFS = VBOX_IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS)
+ifdef VBOX_WITH_RAW_MODE
+tstAsmStructs_DEFS += VBOX_WITH_RAW_MODE
+endif
+ifdef VBOX_WITH_R0_LOGGING
+tstAsmStructs_DEFS += VBOX_WITH_R0_LOGGING
+endif
+ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+tstAsmStructs_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+endif
+tstAsmStructs_INCS = \
+ $(VBOX_PATH_VMM_SRC)/include \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)
+tstAsmStructs_SOURCES = tstAsmStructs.cpp
+tstAsmStructs.cpp_DEPS = $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsHC.h
+
+ifdef VBOX_WITH_RAW_MODE
+tstAsmStructsRC_TEMPLATE= VBoxRcExe
+tstAsmStructsRC_DEFS = VBOX_IN_VMM IN_VMM_RC IN_DIS IN_RT_RC VBOX_WITH_RAW_MODE $(VMM_COMMON_DEFS)
+ ifdef VBOX_WITH_R0_LOGGING
+tstAsmStructsRC_DEFS += VBOX_WITH_R0_LOGGING
+ endif
+ ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+tstAsmStructsRC_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ endif
+tstAsmStructsRC_INCS = \
+ $(VBOX_PATH_VMM_SRC)/include \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)
+tstAsmStructsRC_SOURCES = tstAsmStructs.cpp
+tstAsmStructs.cpp_DEPS += $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsRC.h
+endif # VBOX_WITH_RAW_MODE
+
+
+#
+# Glboal config tool.
+#
+if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstGlobalConfigHardened_TEMPLATE = VBoxR3HardenedTstExe
+tstGlobalConfigHardened_NAME = tstGlobalConfig
+tstGlobalConfigHardened_DEFS = PROGRAM_NAME_STR=\"tstGlobalConfig\"
+tstGlobalConfigHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplateTestcase.cpp
+tstGlobalConfig_TEMPLATE = VBoxR3HardenedTstDll
+else
+tstGlobalConfig_TEMPLATE = VBOXR3TSTEXE
+endif
+tstGlobalConfig_SOURCES = tstGlobalConfig.cpp
+tstGlobalConfig_LIBS = $(LIB_RUNTIME)
+
+#
+# Testcase for checking the repurposing of the IEM instruction code.
+#
+tstIEMCheckMc_TEMPLATE = VBOXR3TSTEXE
+tstIEMCheckMc_SOURCES = tstIEMCheckMc.cpp
+tstIEMCheckMc_DEFS = $(VMM_COMMON_DEFS)
+tstIEMCheckMc_LIBS = $(LIB_RUNTIME)
+ifeq ($(KBUILD_TARGET),win)
+tstIEMCheckMc_CXXFLAGS = $(VBOX_C_CXX_FLAGS_NO_UNUSED_PARAMETERS) -wd4189 # local variable is initialized but not used.
+else
+tstIEMCheckMc_CXXFLAGS = $(VBOX_C_CXX_FLAGS_NO_UNUSED_PARAMETERS) -Wno-unused-value -Wno-unused-variable
+endif
+
+#
+# VMM heap testcase.
+#
+if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstMMHyperHeapHardened_TEMPLATE = VBoxR3HardenedTstExe
+tstMMHyperHeapHardened_NAME = tstMMHyperHeap
+tstMMHyperHeapHardened_DEFS = PROGRAM_NAME_STR=\"tstMMHyperHeap\"
+tstMMHyperHeapHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplateTestcase.cpp
+tstMMHyperHeap_TEMPLATE = VBoxR3HardenedTstDll
+else
+tstMMHyperHeap_TEMPLATE = VBOXR3TSTEXE
+endif
+tstMMHyperHeap_DEFS = $(VMM_COMMON_DEFS)
+tstMMHyperHeap_SOURCES = tstMMHyperHeap.cpp
+tstMMHyperHeap_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+#
+# Saved state manager testcase.
+#
+tstSSM_TEMPLATE = VBOXR3TSTEXE
+tstSSM_INCS = $(VBOX_PATH_VMM_SRC)/include
+tstSSM_DEFS = $(VMM_COMMON_DEFS)
+tstSSM_SOURCES = tstSSM.cpp
+tstSSM_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+#
+# VMM configuration manager tests.
+#
+if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstCFGMHardened_TEMPLATE = VBoxR3HardenedTstExe
+tstCFGMHardened_NAME = tstCFGM
+tstCFGMHardened_DEFS = PROGRAM_NAME_STR=\"tstCFGM\"
+tstCFGMHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplateTestcase.cpp
+tstCFGM_TEMPLATE = VBoxR3HardenedTstDll
+else
+tstCFGM_TEMPLATE = VBOXR3TSTEXE
+endif
+tstCFGM_DEFS = $(VMM_COMMON_DEFS)
+tstCFGM_SOURCES = tstCFGM.cpp
+tstCFGM_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+#
+# Comparing some compression algorithms considered for SSM usage.
+#
+tstCompressionBenchmark_TEMPLATE = VBOXR3TSTEXE
+tstCompressionBenchmark_SOURCES = tstCompressionBenchmark.cpp
+
+#
+# Two testcases for checking the ring-3 "long jump" code.
+#
+tstVMMR0CallHost-1_TEMPLATE = VBOXR3TSTEXE
+tstVMMR0CallHost-1_DEFS = VMM_R0_NO_SWITCH_STACK
+tstVMMR0CallHost-1_INCS = $(VBOX_PATH_VMM_SRC)/include
+tstVMMR0CallHost-1_SOURCES = \
+ tstVMMR0CallHost-1.cpp
+tstVMMR0CallHost-1_SOURCES.amd64 = \
+ $(VBOX_PATH_VMM_SRC)/VMMR0/VMMR0JmpA-amd64.asm
+tstVMMR0CallHost-1_SOURCES.x86 = \
+ $(VBOX_PATH_VMM_SRC)/VMMR0/VMMR0JmpA-x86.asm
+
+tstVMMR0CallHost-2_EXTENDS = tstVMMR0CallHost-1
+tstVMMR0CallHost-2_DEFS = VMM_R0_SWITCH_STACK
+
+#
+# For testing the VM request queue code.
+#
+if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstVMREQHardened_TEMPLATE = VBOXR3HARDENEDEXE
+tstVMREQHardened_NAME = tstVMREQ
+tstVMREQHardened_DEFS = PROGRAM_NAME_STR=\"tstVMREQ\"
+tstVMREQHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplate.cpp
+tstVMREQ_TEMPLATE = VBOXR3
+else
+tstVMREQ_TEMPLATE = VBOXR3EXE
+endif
+tstVMREQ_DEFS = $(VMM_COMMON_DEFS)
+tstVMREQ_SOURCES = tstVMREQ.cpp
+tstVMREQ_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+#
+# Tool for reanimate things like OS/2 dumps.
+#
+if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstAnimateHardened_TEMPLATE = VBOXR3HARDENEDEXE
+tstAnimateHardened_NAME = tstAnimate
+tstAnimateHardened_DEFS = PROGRAM_NAME_STR=\"tstAnimate\"
+tstAnimateHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplate.cpp
+tstAnimate_TEMPLATE = VBOXR3
+else
+tstAnimate_TEMPLATE = VBOXR3EXE
+endif
+tstAnimate_DEFS = $(VMM_COMMON_DEFS)
+tstAnimate_SOURCES = tstAnimate.cpp
+tstAnimate_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+tstX86-1_TEMPLATE = VBOXR3TSTEXE
+tstX86-1_SOURCES = tstX86-1.cpp tstX86-1A.asm
+tstX86-1_LIBS = $(LIB_RUNTIME)
+tstX86-1_LDFLAGS.linux = $(VBOX_GCC_no-pie)
+
+tstX86-FpuSaveRestore_TEMPLATE = VBOXR3TSTEXE
+tstX86-FpuSaveRestore_SOURCES = tstX86-FpuSaveRestore.cpp tstX86-FpuSaveRestoreA.asm
+tstX86-FpuSaveRestore_LIBS = $(LIB_RUNTIME)
+
+ifdef VBOX_WITH_RAW_MODE
+
+ #
+ # Raw-mode VMM testcase.
+ #
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstVMMHardened_TEMPLATE = VBOXR3HARDENEDEXE
+tstVMMHardened_NAME = tstVMM
+tstVMMHardened_DEFS = PROGRAM_NAME_STR=\"tstVMM\"
+tstVMMHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplate.cpp
+tstVMM_TEMPLATE = VBOXR3
+ else
+tstVMM_TEMPLATE = VBOXR3EXE
+ endif
+tstVMM_SOURCES = tstVMM.cpp
+tstVMM_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+#
+# HM VMM testcase.
+#
+tstVMM-HM_TEMPLATE = VBOXR3EXE
+tstVMM-HM_SOURCES = tstVMM-HM.cpp
+tstVMM-HM_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+#
+# VMM host process fork test case (memory ++).
+#
+tstVMMFork_TEMPLATE = VBOXR3EXE
+tstVMMFork_SOURCES = tstVMMFork.cpp
+tstVMMFork_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+#
+# Raw-mode micro benchmark.
+#
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstMicroHardened_TEMPLATE = VBOXR3HARDENEDEXE
+tstMicroHardened_NAME = tstMicro
+tstMicroHardened_DEFS = PROGRAM_NAME_STR=\"tstMicro\"
+tstMicroHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplate.cpp
+tstMicro_TEMPLATE = VBOXR3
+ else
+tstMicro_TEMPLATE = VBOXR3EXE
+ endif
+tstMicro_SOURCES = tstMicro.cpp
+tstMicro_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+tstMicro_DEFS = $(if $(VBOX_WITH_RAW_MODE),VBOX_WITH_RAW_MODE,)
+
+tstMicroRC_TEMPLATE = VBoxRc
+tstMicroRC_SOURCES = tstMicroRC.cpp tstMicroRCA.asm
+tstMicroRC_DEFS = $(if $(VBOX_WITH_RAW_MODE),VBOX_WITH_RAW_MODE,)
+tstMicroRC_INCS = $(VBOX_PATH_VMM_SRC)/testcase
+ ifeq ($(VBOX_LDR_FMT32),pe)
+tstMicroRC_LDFLAGS = -Entry:tstMicroRC
+ endif
+tstMicroRC_SYSSUFF = .gc
+tstMicroRC_LIBS = \
+ $(PATH_STAGE_LIB)/DisasmRC$(VBOX_SUFF_LIB) \
+ $(PATH_STAGE_LIB)/RuntimeRC$(VBOX_SUFF_LIB)
+ ifeq ($(filter-out pe lx,$(VBOX_LDR_FMT32)),)
+tstMicroRC_LIBS += \
+ $(PATH_STAGE_LIB)/VMMRCBuiltin$(VBOX_SUFF_LIB) \
+ $(LIB_VMMRC)
+ endif
+tstMicroRC_SOURCES.win = tstMicroRC.def
+
+endif # VBOX_WITH_RAW_MODE
+
+
+if !defined(VBOX_ONLY_EXTPACKS_USE_IMPLIBS)
+#
+# Special NEM host testcase.
+#
+ if ("$(KBUILD_TARGET_ARCH).$(KBUILD_TARGET_ARCH)" == "darwin.amd64" && (defined(VBOX_WITH_NATIVE_NEM) || "$(USERNAME)" == "bird")) \
+ || ("$(KBUILD_TARGET_ARCH).$(KBUILD_TARGET_ARCH)" == "linux.amd64" && (defined(VBOX_WITH_NATIVE_NEM) || "$(USERNAME)" == "bird")) \
+ || ("$(KBUILD_TARGET_ARCH).$(KBUILD_TARGET_ARCH)" == "win.amd64" && defined(VBOX_WITH_NATIVE_NEM))
+PROGRAMS += NemRawBench-1
+NemRawBench-1_TEMPLATE = VBoxR3Static
+NemRawBench-1_SOURCES = NemRawBench-1.cpp
+NemRawBench-1_BLD_TYPE = release
+NemRawBench-1_INCS.win = \
+ $(KBUILD_DEVTOOLS)/win.x86/sdk/v10.0.17134.0/include/10.0.17134.0/um \
+ $(KBUILD_DEVTOOLS)/win.x86/sdk/v10.0.17134.0/include/10.0.17134.0/shared
+NemRawBench-1_CXXFLAGS.darwin = \
+ -F/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform//Developer/SDKs/MacOSX10.13.sdk/System/Library/Frameworks
+#NemRawBench-1_LDFLAGS.darwin = \
+# -F/System/Library/Frameworks \
+# -framework Hypervisor
+NemRawBench-1_LDFLAGS.darwin = \
+ /System/Library/Frameworks/Hypervisor.framework/Hypervisor
+ endif
+endif
+
+
+ifdef VBOX_WITH_PDM_ASYNC_COMPLETION
+#
+# PDM asynchronous completation test.
+#
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstPDMAsyncCompletionHardened_TEMPLATE = VBOXR3HARDENEDEXE
+tstPDMAsyncCompletionHardened_NAME = tstPDMAsyncCompletion
+tstPDMAsyncCompletionHardened_DEFS = PROGRAM_NAME_STR=\"tstPDMAsyncCompletion\"
+tstPDMAsyncCompletionHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplate.cpp
+tstPDMAsyncCompletion_TEMPLATE = VBOXR3
+ else
+tstPDMAsyncCompletion_TEMPLATE = VBOXR3EXE
+ endif
+tstPDMAsyncCompletion_DEFS = $(VMM_COMMON_DEFS)
+tstPDMAsyncCompletion_INCS = $(VBOX_PATH_VMM_SRC)/include
+tstPDMAsyncCompletion_SOURCES = tstPDMAsyncCompletion.cpp
+tstPDMAsyncCompletion_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+
+#
+# PDM asynchronous completation stress test.
+#
+ if defined(VBOX_WITH_HARDENING) && "$(KBUILD_TARGET)" == "win"
+tstPDMAsyncCompletionStressHardened_TEMPLATE = VBOXR3HARDENEDEXE
+tstPDMAsyncCompletionStressHardened_NAME = tstPDMAsyncCompletionStress
+tstPDMAsyncCompletionStressHardened_DEFS = PROGRAM_NAME_STR=\"tstPDMAsyncCompletionStress\"
+tstPDMAsyncCompletionStressHardened_SOURCES = ../../HostDrivers/Support/SUPR3HardenedMainTemplate.cpp
+tstPDMAsyncCompletionStress_TEMPLATE = VBOXR3
+ else
+tstPDMAsyncCompletionStress_TEMPLATE = VBOXR3EXE
+ endif
+tstPDMAsyncCompletionStress_DEFS = $(VMM_COMMON_DEFS)
+tstPDMAsyncCompletionStress_INCS = $(VBOX_PATH_VMM_SRC)/include
+tstPDMAsyncCompletionStress_SOURCES = tstPDMAsyncCompletionStress.cpp
+tstPDMAsyncCompletionStress_LIBS = $(LIB_VMM) $(LIB_REM) $(LIB_RUNTIME)
+endif
+
+ifndef VBOX_ONLY_EXTPACKS
+PROGRAMS += tstSSM-2
+tstSSM-2_TEMPLATE = VBOXR3TSTEXE
+tstSSM-2_DEFS = IN_VMM_STATIC
+tstSSM-2_SOURCES = tstSSM-2.cpp
+tstSSM-2_LIBS = $(PATH_STAGE_LIB)/SSMStandalone$(VBOX_SUFF_LIB)
+endif
+
+#
+# Generate VM structure tests.
+#
+if !defined(VBOX_ONLY_EXTPACKS) \
+ && ( defined(VBOX_WITH_DTRACE_R3) \
+ || defined(VBOX_WITH_DTRACE_R0) \
+ || defined(VBOX_WITH_DTRACE_RC))
+tstVMStructDTrace_TEMPLATE = VBOXR3AUTOTST
+tstVMStructDTrace_DEFS = VBOX_IN_VMM IN_VMM_R3 IN_DIS $(VMM_COMMON_DEFS)
+ ifdef VBOX_WITH_RAW_MODE
+tstVMStructDTrace_DEFS += VBOX_WITH_RAW_MODE
+ endif
+tstVMStructDTrace_INCS = \
+ $(VBOX_PATH_VMM_SRC)/include \
+ $(VBOX_PATH_VMM_SRC)/PATM \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)
+tstVMStructDTrace_SOURCES = tstVMStructDTrace.cpp
+ ifdef VBOX_WITH_R0_LOGGING
+tstVMStructDTrace_DEFS += VBOX_WITH_R0_LOGGING
+ endif
+ ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+tstVMStructDTrace_DEFS += VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ endif
+
+
+VMMLibDTraceStructTest_INST = $(VBOX_INST_DTRACE_TST)$(KBUILD_TARGET_ARCH)/
+VMMLibDTraceStructTest_SOURCES = \
+ $(tstVMStructDTrace_0_OUTDIR)/vbox-vm-struct-test.d
+VMMLibDTraceStructTest_CLEAN = \
+ $(tstVMStructDTrace_0_OUTDIR)/vbox-vm-struct-test.d
+
+$$(tstVMStructDTrace_0_OUTDIR)/vbox-vm-struct-test.d: \
+ $$(tstVMStructDTrace_1_STAGE_TARGET) | $$(dir $$@)
+ $(QUIET)$(RM) -f $@
+ $< > $@
+
+endif
+
+
+include $(FILE_KBUILD_SUB_FOOTER)
+
+
+#
+# Some handcrafted support targets for tstAsmStructs.
+#
+MY_ASA_ASM_STUFF = \
+ $(addprefix -D, \
+ $(DEFS) \
+ $(DEFS.$(KBUILD_TYPE)) \
+ $(DEFS.$(KBUILD_TARGET)) \
+ IN_RING3 $(ARCH_BITS_DEFS) \
+ $(DEFS.$(KBUILD_TARGET_ARCH)) \
+ $(DEFS.$(KBUILD_TARGET).$(KBUILD_TARGET_ARCH)) \
+ $(if $(VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI),VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI,) \
+ $(VMM_COMMON_DEFS) \
+ ) \
+ -f $(if $(eq $(KBUILD_TARGET),darwin),macho,elf) \
+ $(foreach inc,$(INCS) $(VBOX_PATH_VMM_SRC)/testcase $(VBOX_PATH_VMM_SRC)/include $(VBOX_VMM_TESTCASE_OUT_DIR)\
+ ,-I$(inc)/)
+
+# 1a. make a header file which makes all the structures+members globals.
+$(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.mac: \
+ $(VBOX_PATH_VMM_SRC)/testcase/tstAsmStructsAsm.asm \
+ $(VBOX_PATH_VMM_SRC)/testcase/tstAsmStructsAsm-lst.sed \
+ $(DEPTH)/include/iprt/asmdefs.mac \
+ $(DEPTH)/include/VBox/vmm/cpum.mac \
+ $(DEPTH)/include/VBox/vmm/vm.mac \
+ $(DEPTH)/include/VBox/sup.mac \
+ $(DEPTH)/include/iprt/x86.mac \
+ $(VBOX_PATH_VMM_SRC)/include/CPUMInternal.mac \
+ $(VBOX_PATH_VMM_SRC)/include/HMInternal.mac \
+ $(VBOX_PATH_VMM_SRC)/include/VMMInternal.mac \
+ $(VBOX_PATH_VMM_SRC)/testcase/Makefile.kmk \
+ $(PATH_ROOT)/Config.kmk $(LOCALCFG) $(AUTOCFG) \
+ | $$(dir $$@)
+ $(call MSG_GENERATE,tstVMStructSize,$@,$<)
+ifndef DONT_USE_YASM
+ $(QUIET)$(TOOL_YASM_AS) $(MY_ASA_ASM_STUFF) -o $@.o -l $@.lst $<
+ $(SED) -f $(VBOX_PATH_VMM_SRC)/testcase/tstAsmStructsAsm-lst.sed --output $@ $@.lst
+else
+ $(QUIET)$(TOOL_NASM_AS) -g $(MY_ASA_ASM_STUFF) -o $@.o -l $@.lst $<
+ $(VBOX_NM) $@.o | $(SED) \
+ -e '/[0-9a-fA-F][0-9a-fA-F]* [^a] /d' \
+ -e 's/[0-9a-fA-F][0-9a-fA-F]* a \([^ ]*\)/global \1/' \
+ > $@
+endif
+
+# 1b. make an elf/macho object containing the offsets.
+includedep $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.o.dep
+$(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.o: \
+ $(VBOX_PATH_VMM_SRC)/testcase/tstAsmStructsAsm.asm \
+ $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.mac \
+ $(DEPTH)/include/iprt/asmdefs.mac \
+ $(DEPTH)/include/VBox/vmm/cpum.mac \
+ $(DEPTH)/include/VBox/vmm/hm_vmx.mac \
+ $(DEPTH)/include/VBox/vmm/stam.mac \
+ $(DEPTH)/include/VBox/vmm/vm.mac \
+ $(DEPTH)/include/VBox/sup.mac \
+ $(DEPTH)/include/iprt/x86.mac \
+ $(VBOX_PATH_VMM_SRC)/include/CPUMInternal.mac \
+ $(VBOX_PATH_VMM_SRC)/include/HMInternal.mac \
+ $(VBOX_PATH_VMM_SRC)/include/VMMInternal.mac \
+ $(VBOX_PATH_VMM_SRC)/testcase/Makefile.kmk \
+ $$(if $$(eq $$(tstAsmStructsAsmDep_STUFF),$$(MY_ASA_ASM_STUFF)),,FORCE) \
+ | $$(dir $$@)
+ $(call MSG_COMPILE,tstAsmStructsasm,$<,$@,AS)
+ifndef DONT_USE_YASM
+ $(QUIET)$(TOOL_YASM_AS) $(MY_ASA_ASM_STUFF) -DDO_GLOBALS -o $@ $<
+else
+ $(QUIET)$(TOOL_NASM_AS) $(MY_ASA_ASM_STUFF) -DDO_GLOBALS -o $@ $<
+endif
+ %$(QUIET2)$(RM) -f -- $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.o.dep
+ %$(QUIET2)$(APPEND) '$(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.o.dep' 'tstAsmStructsAsmDep_STUFF=$(MY_ASA_ASM_STUFF)'
+
+# 2. use nm and sed to transform this into the header we want.
+$(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsHC.h: $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructsAsm.o
+ $(call MSG_GENERATE,tstVMStructSize,$@,$<)
+ $(QUIET)$(RM) -f $@ $@.dump $@.tmp
+ $(QUIET)$(REDIRECT) -wo $@.dump -- $(VBOX_NM) $<
+ $(QUIET)$(SED) \
+ -e '/STAMPROFILEADV/d' \
+ \
+ -e '/^\(0x\)\{0,1\}00[0-9a-fA-F]* [aAnN] [^_.]*\./!d' \
+ -e 's/^\(0x\)\{0,1\}\(00[0-9a-fA-F]*\) [aAnN] \([^.]*\)\.\(.*$$\)/ CHECK_OFF(\3, 0x0\2, \4);/' \
+ --output $@.tmp $@.dump
+ $(QUIET)$(SED) \
+ -e '/VM_size$$/d' \
+ -e '/VMCPU_size$$/d' \
+ -e '/VMMCPU_size$$/d' \
+ -e '/SUPDRVTRACERUSRCTX32_size$$/d' \
+ -e '/HMCPU_size$$/d' \
+ \
+ -e '/^\(0x\)\{0,1\}00[0-9a-fA-F]* [aAnN] [^_.]*_size$$/!d' \
+ -e 's/^\(0x\)\{0,1\}\(00[0-9a-fA-F]*\) [aAnN] \([^_.]*\)_size/ CHECK_SIZE(\3, 0x0\2);/' \
+ --append $@.tmp $@.dump
+ $(QUIET)$(MV) -f $@.tmp $@
+ $(QUIET)$(RM) -f $@.dump
+
+# 3. run it.
+$(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run: \
+ $$(tstAsmStructs_1_STAGE_TARGET) \
+ $(if-expr defined(VBOX_WITH_RAW_MODE),$$(tstAsmStructsRC_1_STAGE_TARGET),)
+ $(QUIET)$(RM) -f $@
+ $(tstAsmStructs_1_STAGE_TARGET)
+ifdef VBOX_WITH_RAW_MODE
+ $(tstAsmStructsRC_1_STAGE_TARGET)
+endif
+ $(QUIET)$(APPEND) "$@" "done"
+
+
+
+#
+# Run rule for tstVMStructSize.
+#
+
+ifdef VBOX_WITH_RAW_MODE
+# 1. Manually dump selected structures and members.
+$(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructRC.h: $$(tstVMStructRC_1_STAGE_TARGET) | $$(dir $$@)
+ $(call MSG_GENERATE,tstVMStructSize,$@)
+ $(QUIET)$(REDIRECT) -wo $@ -- $<
+endif # VBOX_WITH_RAW_MODE
+
+# 2. run it.
+$(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run: $$(tstVMStructSize_1_STAGE_TARGET) | $$(dir $$@)
+ $(QUIET)$(RM) -f $@
+ $<
+ $(QUIET)$(APPEND) "$@" "done"
+
+# alias for the two struct tests.
+run-struct-tests: $(VBOX_VMM_TESTCASE_OUT_DIR)/tstAsmStructs.run $(VBOX_VMM_TESTCASE_OUT_DIR)/tstVMStructSize.run
+
diff --git a/src/VBox/VMM/testcase/NemRawBench-1.cpp b/src/VBox/VMM/testcase/NemRawBench-1.cpp
new file mode 100644
index 00000000..944b372e
--- /dev/null
+++ b/src/VBox/VMM/testcase/NemRawBench-1.cpp
@@ -0,0 +1,1346 @@
+/* $Id: NemRawBench-1.cpp $ */
+/** @file
+ * NEM Benchmark.
+ */
+
+/*
+ * Copyright (C) 2018-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#ifdef RT_OS_WINDOWS
+# include <iprt/win/windows.h>
+# include <WinHvPlatform.h>
+# if !defined(_INTPTR) && defined(_M_AMD64) /* void pedantic stdint.h warnings */
+# define _INTPTR 2
+# endif
+
+#elif defined(RT_OS_LINUX)
+# include <linux/kvm.h>
+# include <errno.h>
+# include <sys/fcntl.h>
+# include <sys/ioctl.h>
+# include <sys/mman.h>
+# include <unistd.h>
+# include <time.h>
+
+#elif defined(RT_OS_DARWIN)
+# include <Availability.h>
+# if 1 /* header mix hack */
+# undef __OSX_AVAILABLE_STARTING
+# define __OSX_AVAILABLE_STARTING(_osx, _ios)
+# endif
+# include <Hypervisor/hv.h>
+# include <Hypervisor/hv_arch_x86.h>
+# include <Hypervisor/hv_arch_vmx.h>
+# include <Hypervisor/hv_vmx.h>
+# include <mach/mach_time.h>
+# include <mach/kern_return.h>
+# include <sys/time.h>
+# include <time.h>
+# include <sys/mman.h>
+# include <errno.h>
+
+#else
+# error "port me"
+#endif
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+/** The base mapping address of the g_pbMem. */
+#define MY_MEM_BASE 0x1000
+/** No-op MMIO access address. */
+#define MY_NOP_MMIO 0x0808
+/** The RIP which the testcode starts. */
+#define MY_TEST_RIP 0x2000
+
+/** The test termination port number. */
+#define MY_TERM_PORT 0x01
+/** The no-op test port number. */
+#define MY_NOP_PORT 0x7f
+
+#define MY_TEST_F_NOP_IO (1U<<0)
+#define MY_TEST_F_CPUID (1U<<1)
+#define MY_TEST_F_NOP_MMIO (1U<<2)
+
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** Chunk of memory mapped at address 0x1000 (MY_MEM_BASE). */
+static unsigned char *g_pbMem;
+/** Amount of RAM at address 0x1000 (MY_MEM_BASE). */
+static size_t g_cbMem;
+#ifdef RT_OS_WINDOWS
+static WHV_PARTITION_HANDLE g_hPartition = NULL;
+
+/** @name APIs imported from WinHvPlatform.dll
+ * @{ */
+static decltype(WHvCreatePartition) *g_pfnWHvCreatePartition;
+static decltype(WHvSetupPartition) *g_pfnWHvSetupPartition;
+static decltype(WHvGetPartitionProperty) *g_pfnWHvGetPartitionProperty;
+static decltype(WHvSetPartitionProperty) *g_pfnWHvSetPartitionProperty;
+static decltype(WHvMapGpaRange) *g_pfnWHvMapGpaRange;
+static decltype(WHvCreateVirtualProcessor) *g_pfnWHvCreateVirtualProcessor;
+static decltype(WHvRunVirtualProcessor) *g_pfnWHvRunVirtualProcessor;
+static decltype(WHvGetVirtualProcessorRegisters) *g_pfnWHvGetVirtualProcessorRegisters;
+static decltype(WHvSetVirtualProcessorRegisters) *g_pfnWHvSetVirtualProcessorRegisters;
+/** @} */
+static uint64_t (WINAPI *g_pfnRtlGetSystemTimePrecise)(void);
+
+#elif defined(RT_OS_LINUX)
+/** The VM handle. */
+static int g_fdVm;
+/** The VCPU handle. */
+static int g_fdVCpu;
+/** The kvm_run structure for the VCpu. */
+static struct kvm_run *g_pVCpuRun;
+/** The size of the g_pVCpuRun mapping. */
+static ssize_t g_cbVCpuRun;
+
+#elif defined(RT_OS_DARWIN)
+/** The VCpu ID. */
+static hv_vcpuid_t g_idVCpu;
+#endif
+
+
+static int error(const char *pszFormat, ...)
+{
+ fprintf(stderr, "error: ");
+ va_list va;
+ va_start(va, pszFormat);
+ vfprintf(stderr, pszFormat, va);
+ va_end(va);
+ return 1;
+}
+
+
+static uint64_t getNanoTS(void)
+{
+#ifdef RT_OS_WINDOWS
+ return g_pfnRtlGetSystemTimePrecise() * 100;
+
+#elif defined(RT_OS_LINUX)
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return (uint64_t)ts.tv_sec * UINT64_C(1000000000) + ts.tv_nsec;
+
+#elif defined(RT_OS_DARWIN)
+ static struct mach_timebase_info s_Info = { 0, 0 };
+ static double s_rdFactor = 0.0;
+ /* Lazy init. */
+ if (s_Info.denom != 0)
+ { /* likely */ }
+ else if (mach_timebase_info(&s_Info) == KERN_SUCCESS)
+ s_rdFactor = (double)s_Info.numer / (double)s_Info.denom;
+ else
+ {
+ error("mach_timebase_info(&Info) failed\n");
+ exit(1);
+ }
+ if (s_Info.denom == 1 && s_Info.numer == 1) /* special case: absolute time is in nanoseconds */
+ return mach_absolute_time();
+ return mach_absolute_time() * s_rdFactor;
+#else
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return (uint64_t)tv.tv_sec * UINT64_C(1000000000)
+ + (tv.tv_usec * UINT32_C(1000));
+#endif
+}
+
+
+char *formatNum(uint64_t uNum, unsigned cchWidth, char *pszDst, size_t cbDst)
+{
+ char szTmp[64 + 22];
+#ifdef _MSC_VER
+ size_t cchTmp = _snprintf(szTmp, sizeof(szTmp) - 22, "%I64u", uNum);
+#else
+ size_t cchTmp = snprintf(szTmp, sizeof(szTmp) - 22, "%llu", (unsigned long long)uNum);
+#endif
+ size_t cSeps = (cchTmp - 1) / 3;
+ size_t const cchTotal = cchTmp + cSeps;
+ if (cSeps)
+ {
+ szTmp[cchTotal] = '\0';
+ for (size_t iSrc = cchTmp, iDst = cchTotal; cSeps > 0; cSeps--)
+ {
+ szTmp[--iDst] = szTmp[--iSrc];
+ szTmp[--iDst] = szTmp[--iSrc];
+ szTmp[--iDst] = szTmp[--iSrc];
+ szTmp[--iDst] = ' ';
+ }
+ }
+
+ size_t offDst = 0;
+ while (cchWidth-- > cchTotal && offDst < cbDst)
+ pszDst[offDst++] = ' ';
+ size_t offSrc = 0;
+ while (offSrc < cchTotal && offDst < cbDst)
+ pszDst[offDst++] = szTmp[offSrc++];
+ pszDst[offDst] = '\0';
+ return pszDst;
+}
+
+
+int reportResult(const char *pszInstruction, uint32_t cInstructions, uint64_t nsElapsed, uint32_t cExits)
+{
+ uint64_t const cInstrPerSec = nsElapsed ? (uint64_t)cInstructions * 1000000000 / nsElapsed : 0;
+ char szTmp1[64], szTmp2[64], szTmp3[64];
+ printf("%s %7s instructions per second (%s exits in %s ns)\n",
+ formatNum(cInstrPerSec, 10, szTmp1, sizeof(szTmp1)), pszInstruction,
+ formatNum(cExits, 0, szTmp2, sizeof(szTmp2)),
+ formatNum(nsElapsed, 0, szTmp3, sizeof(szTmp3)));
+ return 0;
+}
+
+
+
+#ifdef RT_OS_WINDOWS
+
+/*
+ * Windows - Hyper-V Platform API.
+ */
+
+static int createVM(void)
+{
+ /*
+ * Resolve APIs.
+ */
+ HMODULE hmod = LoadLibraryW(L"WinHvPlatform.dll");
+ if (hmod == NULL)
+ return error("Error loading WinHvPlatform.dll: %u\n", GetLastError());
+ static struct { const char *pszFunction; FARPROC *ppfn; } const s_aImports[] =
+ {
+# define IMPORT_ENTRY(a_Name) { #a_Name, (FARPROC *)&g_pfn##a_Name }
+ IMPORT_ENTRY(WHvCreatePartition),
+ IMPORT_ENTRY(WHvSetupPartition),
+ IMPORT_ENTRY(WHvGetPartitionProperty),
+ IMPORT_ENTRY(WHvSetPartitionProperty),
+ IMPORT_ENTRY(WHvMapGpaRange),
+ IMPORT_ENTRY(WHvCreateVirtualProcessor),
+ IMPORT_ENTRY(WHvRunVirtualProcessor),
+ IMPORT_ENTRY(WHvGetVirtualProcessorRegisters),
+ IMPORT_ENTRY(WHvSetVirtualProcessorRegisters),
+# undef IMPORT_ENTRY
+ };
+ FARPROC pfn;
+ for (size_t i = 0; i < sizeof(s_aImports) / sizeof(s_aImports[0]); i++)
+ {
+ *s_aImports[i].ppfn = pfn = GetProcAddress(hmod, s_aImports[i].pszFunction);
+ if (!pfn)
+ return error("Error resolving WinHvPlatform.dll!%s: %u\n", s_aImports[i].pszFunction, GetLastError());
+ }
+# ifndef IN_SLICKEDIT
+# define WHvCreatePartition g_pfnWHvCreatePartition
+# define WHvSetupPartition g_pfnWHvSetupPartition
+# define WHvGetPartitionProperty g_pfnWHvGetPartitionProperty
+# define WHvSetPartitionProperty g_pfnWHvSetPartitionProperty
+# define WHvMapGpaRange g_pfnWHvMapGpaRange
+# define WHvCreateVirtualProcessor g_pfnWHvCreateVirtualProcessor
+# define WHvRunVirtualProcessor g_pfnWHvRunVirtualProcessor
+# define WHvGetVirtualProcessorRegisters g_pfnWHvGetVirtualProcessorRegisters
+# define WHvSetVirtualProcessorRegisters g_pfnWHvSetVirtualProcessorRegisters
+# endif
+ /* Need a precise time function. */
+ *(FARPROC *)&g_pfnRtlGetSystemTimePrecise = pfn = GetProcAddress(GetModuleHandleW(L"ntdll.dll"), "RtlGetSystemTimePrecise");
+ if (pfn == NULL)
+ return error("Error resolving ntdll.dll!RtlGetSystemTimePrecise: %u\n", GetLastError());
+
+ /*
+ * Create the partition with 1 CPU and the specfied amount of memory.
+ */
+ WHV_PARTITION_HANDLE hPartition;
+ HRESULT hrc = WHvCreatePartition(&hPartition);
+ if (!SUCCEEDED(hrc))
+ return error("WHvCreatePartition failed: %#x\n", hrc);
+ g_hPartition = hPartition;
+
+ WHV_PARTITION_PROPERTY Property;
+ memset(&Property, 0, sizeof(Property));
+ Property.ProcessorCount = 1;
+ hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeProcessorCount, &Property, sizeof(Property));
+ if (!SUCCEEDED(hrc))
+ return error("WHvSetPartitionProperty/WHvPartitionPropertyCodeProcessorCount failed: %#x\n", hrc);
+
+ memset(&Property, 0, sizeof(Property));
+ Property.ExtendedVmExits.X64CpuidExit = 1;
+ Property.ExtendedVmExits.X64MsrExit = 1;
+ hrc = WHvSetPartitionProperty(hPartition, WHvPartitionPropertyCodeExtendedVmExits, &Property, sizeof(Property));
+ if (!SUCCEEDED(hrc))
+ return error("WHvSetPartitionProperty/WHvPartitionPropertyCodeExtendedVmExits failed: %#x\n", hrc);
+
+ hrc = WHvSetupPartition(hPartition);
+ if (!SUCCEEDED(hrc))
+ return error("WHvSetupPartition failed: %#x\n", hrc);
+
+ hrc = WHvCreateVirtualProcessor(hPartition, 0 /*idVCpu*/, 0 /*fFlags*/);
+ if (!SUCCEEDED(hrc))
+ return error("WHvCreateVirtualProcessor failed: %#x\n", hrc);
+
+ g_pbMem = (unsigned char *)VirtualAlloc(NULL, g_cbMem, MEM_COMMIT, PAGE_READWRITE);
+ if (!g_pbMem)
+ return error("VirtualAlloc failed: %u\n", GetLastError());
+ memset(g_pbMem, 0xcc, g_cbMem);
+
+ hrc = WHvMapGpaRange(hPartition, g_pbMem, MY_MEM_BASE /*GCPhys*/, g_cbMem,
+ WHvMapGpaRangeFlagRead | WHvMapGpaRangeFlagWrite | WHvMapGpaRangeFlagExecute);
+ if (!SUCCEEDED(hrc))
+ return error("WHvMapGpaRange failed: %#x\n", hrc);
+
+ WHV_RUN_VP_EXIT_CONTEXT ExitInfo;
+ memset(&ExitInfo, 0, sizeof(ExitInfo));
+ WHvRunVirtualProcessor(g_hPartition, 0 /*idCpu*/, &ExitInfo, sizeof(ExitInfo));
+
+ return 0;
+}
+
+
+static int runtimeError(const char *pszFormat, ...)
+{
+ fprintf(stderr, "runtime error: ");
+ va_list va;
+ va_start(va, pszFormat);
+ vfprintf(stderr, pszFormat, va);
+ va_end(va);
+
+ static struct { const char *pszName; WHV_REGISTER_NAME enmName; unsigned uType; } const s_aRegs[] =
+ {
+ { "rip", WHvX64RegisterRip, 64 },
+ { "cs", WHvX64RegisterCs, 1 },
+ { "rflags", WHvX64RegisterRflags, 32 },
+ { "rax", WHvX64RegisterRax, 64 },
+ { "rcx", WHvX64RegisterRcx, 64 },
+ { "rdx", WHvX64RegisterRdx, 64 },
+ { "rbx", WHvX64RegisterRbx, 64 },
+ { "rsp", WHvX64RegisterRsp, 64 },
+ { "ss", WHvX64RegisterSs, 1 },
+ { "rbp", WHvX64RegisterRbp, 64 },
+ { "rsi", WHvX64RegisterRsi, 64 },
+ { "rdi", WHvX64RegisterRdi, 64 },
+ { "ds", WHvX64RegisterDs, 1 },
+ { "es", WHvX64RegisterEs, 1 },
+ { "fs", WHvX64RegisterFs, 1 },
+ { "gs", WHvX64RegisterGs, 1 },
+ { "cr0", WHvX64RegisterCr0, 64 },
+ { "cr2", WHvX64RegisterCr2, 64 },
+ { "cr3", WHvX64RegisterCr3, 64 },
+ { "cr4", WHvX64RegisterCr4, 64 },
+ };
+ for (unsigned i = 0; i < sizeof(s_aRegs) / sizeof(s_aRegs[0]); i++)
+ {
+ WHV_REGISTER_VALUE Value;
+ WHV_REGISTER_NAME enmName = s_aRegs[i].enmName;
+ HRESULT hrc = WHvGetVirtualProcessorRegisters(g_hPartition, 0 /*idCpu*/, &enmName, 1, &Value);
+ if (SUCCEEDED(hrc))
+ {
+ if (s_aRegs[i].uType == 32)
+ fprintf(stderr, "%8s=%08x\n", s_aRegs[i].pszName, Value.Reg32);
+ else if (s_aRegs[i].uType == 64)
+ fprintf(stderr, "%8s=%08x'%08x\n", s_aRegs[i].pszName, (unsigned)(Value.Reg64 >> 32), Value.Reg32);
+ else if (s_aRegs[i].uType == 1)
+ fprintf(stderr, "%8s=%04x base=%08x'%08x limit=%08x attr=%04x\n", s_aRegs[i].pszName,
+ Value.Segment.Selector, (unsigned)(Value.Segment.Base >> 32), (unsigned)Value.Segment.Base,
+ Value.Segment.Limit, Value.Segment.Attributes);
+ }
+ else
+ fprintf(stderr, "%8s=<WHvGetVirtualProcessorRegisters failed %#x>\n", s_aRegs[i].pszName, hrc);
+ }
+
+ return 1;
+}
+
+
+static int runRealModeTest(unsigned cInstructions, const char *pszInstruction, unsigned fTest,
+ unsigned uEax, unsigned uEcx, unsigned uEdx, unsigned uEbx,
+ unsigned uEsp, unsigned uEbp, unsigned uEsi, unsigned uEdi)
+{
+ (void)fTest;
+
+ /*
+ * Initialize the real mode context.
+ */
+# define ADD_REG64(a_enmName, a_uValue) do { \
+ aenmNames[iReg] = (a_enmName); \
+ aValues[iReg].Reg128.High64 = 0; \
+ aValues[iReg].Reg64 = (a_uValue); \
+ iReg++; \
+ } while (0)
+# define ADD_SEG(a_enmName, a_Base, a_Limit, a_Sel, a_fCode) \
+ do { \
+ aenmNames[iReg] = a_enmName; \
+ aValues[iReg].Segment.Base = (a_Base); \
+ aValues[iReg].Segment.Limit = (a_Limit); \
+ aValues[iReg].Segment.Selector = (a_Sel); \
+ aValues[iReg].Segment.Attributes = a_fCode ? 0x9b : 0x93; \
+ iReg++; \
+ } while (0)
+ WHV_REGISTER_NAME aenmNames[80];
+ WHV_REGISTER_VALUE aValues[80];
+ unsigned iReg = 0;
+ ADD_REG64(WHvX64RegisterRax, uEax);
+ ADD_REG64(WHvX64RegisterRcx, uEcx);
+ ADD_REG64(WHvX64RegisterRdx, uEdx);
+ ADD_REG64(WHvX64RegisterRbx, uEbx);
+ ADD_REG64(WHvX64RegisterRsp, uEsp);
+ ADD_REG64(WHvX64RegisterRbp, uEbp);
+ ADD_REG64(WHvX64RegisterRsi, uEsi);
+ ADD_REG64(WHvX64RegisterRdi, uEdi);
+ ADD_REG64(WHvX64RegisterRip, MY_TEST_RIP);
+ ADD_REG64(WHvX64RegisterRflags, 2);
+ ADD_SEG(WHvX64RegisterEs, 0x00000, 0xffff, 0x0000, 0);
+ ADD_SEG(WHvX64RegisterCs, 0x00000, 0xffff, 0x0000, 1);
+ ADD_SEG(WHvX64RegisterSs, 0x00000, 0xffff, 0x0000, 0);
+ ADD_SEG(WHvX64RegisterDs, 0x00000, 0xffff, 0x0000, 0);
+ ADD_SEG(WHvX64RegisterFs, 0x00000, 0xffff, 0x0000, 0);
+ ADD_SEG(WHvX64RegisterGs, 0x00000, 0xffff, 0x0000, 0);
+ ADD_REG64(WHvX64RegisterCr0, 0x10010 /*WP+ET*/);
+ ADD_REG64(WHvX64RegisterCr2, 0);
+ ADD_REG64(WHvX64RegisterCr3, 0);
+ ADD_REG64(WHvX64RegisterCr4, 0);
+ HRESULT hrc = WHvSetVirtualProcessorRegisters(g_hPartition, 0 /*idCpu*/, aenmNames, iReg, aValues);
+ if (!SUCCEEDED(hrc))
+ return error("WHvSetVirtualProcessorRegisters failed (for %s): %#x\n", pszInstruction, hrc);
+# undef ADD_REG64
+# undef ADD_SEG
+
+ /*
+ * Run the test.
+ */
+ uint32_t cExits = 0;
+ uint64_t const nsStart = getNanoTS();
+ for (;;)
+ {
+ WHV_RUN_VP_EXIT_CONTEXT ExitInfo;
+ memset(&ExitInfo, 0, sizeof(ExitInfo));
+ hrc = WHvRunVirtualProcessor(g_hPartition, 0 /*idCpu*/, &ExitInfo, sizeof(ExitInfo));
+ if (SUCCEEDED(hrc))
+ {
+ cExits++;
+ if (ExitInfo.ExitReason == WHvRunVpExitReasonX64IoPortAccess)
+ {
+ if (ExitInfo.IoPortAccess.PortNumber == MY_NOP_PORT)
+ { /* likely: nop instruction */ }
+ else if (ExitInfo.IoPortAccess.PortNumber == MY_TERM_PORT)
+ break;
+ else
+ return runtimeError("Unexpected I/O port access (for %s): %#x\n", pszInstruction, ExitInfo.IoPortAccess.PortNumber);
+
+ /* Advance. */
+ if (ExitInfo.VpContext.InstructionLength)
+ {
+ aenmNames[0] = WHvX64RegisterRip;
+ aValues[0].Reg64 = ExitInfo.VpContext.Rip + ExitInfo.VpContext.InstructionLength;
+ hrc = WHvSetVirtualProcessorRegisters(g_hPartition, 0 /*idCpu*/, aenmNames, 1, aValues);
+ if (SUCCEEDED(hrc))
+ { /* likely */ }
+ else
+ return runtimeError("Error advancing RIP (for %s): %#x\n", pszInstruction, hrc);
+ }
+ else
+ return runtimeError("VpContext.InstructionLength is zero (for %s)\n", pszInstruction);
+ }
+ else if (ExitInfo.ExitReason == WHvRunVpExitReasonX64Cpuid)
+ {
+ /* Advance RIP and set default results. */
+ if (ExitInfo.VpContext.InstructionLength)
+ {
+ aenmNames[0] = WHvX64RegisterRip;
+ aValues[0].Reg64 = ExitInfo.VpContext.Rip + ExitInfo.VpContext.InstructionLength;
+ aenmNames[1] = WHvX64RegisterRax;
+ aValues[1].Reg64 = ExitInfo.CpuidAccess.DefaultResultRax;
+ aenmNames[2] = WHvX64RegisterRcx;
+ aValues[2].Reg64 = ExitInfo.CpuidAccess.DefaultResultRcx;
+ aenmNames[3] = WHvX64RegisterRdx;
+ aValues[3].Reg64 = ExitInfo.CpuidAccess.DefaultResultRdx;
+ aenmNames[4] = WHvX64RegisterRbx;
+ aValues[4].Reg64 = ExitInfo.CpuidAccess.DefaultResultRbx;
+ hrc = WHvSetVirtualProcessorRegisters(g_hPartition, 0 /*idCpu*/, aenmNames, 5, aValues);
+ if (SUCCEEDED(hrc))
+ { /* likely */ }
+ else
+ return runtimeError("Error advancing RIP (for %s): %#x\n", pszInstruction, hrc);
+ }
+ else
+ return runtimeError("VpContext.InstructionLength is zero (for %s)\n", pszInstruction);
+ }
+ else if (ExitInfo.ExitReason == WHvRunVpExitReasonMemoryAccess)
+ {
+ if (ExitInfo.MemoryAccess.Gpa == MY_NOP_MMIO)
+ { /* likely: nop address */ }
+ else
+ return runtimeError("Unexpected memory access (for %s): %#x\n", pszInstruction, ExitInfo.MemoryAccess.Gpa);
+
+ /* Advance and set return register (assuming RAX and two byte instruction). */
+ aenmNames[0] = WHvX64RegisterRip;
+ if (ExitInfo.VpContext.InstructionLength)
+ aValues[0].Reg64 = ExitInfo.VpContext.Rip + ExitInfo.VpContext.InstructionLength;
+ else
+ aValues[0].Reg64 = ExitInfo.VpContext.Rip + 2;
+ aenmNames[1] = WHvX64RegisterRax;
+ aValues[1].Reg64 = 42;
+ hrc = WHvSetVirtualProcessorRegisters(g_hPartition, 0 /*idCpu*/, aenmNames, 2, aValues);
+ if (SUCCEEDED(hrc))
+ { /* likely */ }
+ else
+ return runtimeError("Error advancing RIP (for %s): %#x\n", pszInstruction, hrc);
+ }
+ else
+ return runtimeError("Unexpected exit (for %s): %#x\n", pszInstruction, ExitInfo.ExitReason);
+ }
+ else
+ return runtimeError("WHvRunVirtualProcessor failed (for %s): %#x\n", pszInstruction, hrc);
+ }
+ uint64_t const nsElapsed = getNanoTS() - nsStart;
+ return reportResult(pszInstruction, cInstructions, nsElapsed, cExits);
+}
+
+
+
+#elif defined(RT_OS_LINUX)
+
+/*
+ * GNU/linux - KVM
+ */
+
+static int createVM(void)
+{
+ int fd = open("/dev/kvm", O_RDWR);
+ if (fd < 0)
+ return error("Error opening /dev/kvm: %d\n", errno);
+
+ g_fdVm = ioctl(fd, KVM_CREATE_VM, (uintptr_t)0);
+ if (g_fdVm < 0)
+ return error("KVM_CREATE_VM failed: %d\n", errno);
+
+ /* Create the VCpu. */
+ g_cbVCpuRun = ioctl(fd, KVM_GET_VCPU_MMAP_SIZE, (uintptr_t)0);
+ if (g_cbVCpuRun <= 0x1000 || (g_cbVCpuRun & 0xfff))
+ return error("Failed to get KVM_GET_VCPU_MMAP_SIZE: %#xz errno=%d\n", g_cbVCpuRun, errno);
+
+ g_fdVCpu = ioctl(g_fdVm, KVM_CREATE_VCPU, (uintptr_t)0);
+ if (g_fdVCpu < 0)
+ return error("KVM_CREATE_VCPU failed: %d\n", errno);
+
+ g_pVCpuRun = (struct kvm_run *)mmap(NULL, g_cbVCpuRun, PROT_READ | PROT_WRITE, MAP_PRIVATE, g_fdVCpu, 0);
+ if ((void *)g_pVCpuRun == MAP_FAILED)
+ return error("mmap kvm_run failed: %d\n", errno);
+
+ /* Memory. */
+ g_pbMem = (unsigned char *)mmap(NULL, g_cbMem, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if ((void *)g_pbMem == MAP_FAILED)
+ return error("mmap RAM failed: %d\n", errno);
+
+ struct kvm_userspace_memory_region MemReg;
+ MemReg.slot = 0;
+ MemReg.flags = 0;
+ MemReg.guest_phys_addr = MY_MEM_BASE;
+ MemReg.memory_size = g_cbMem;
+ MemReg.userspace_addr = (uintptr_t)g_pbMem;
+ int rc = ioctl(g_fdVm, KVM_SET_USER_MEMORY_REGION, &MemReg);
+ if (rc != 0)
+ return error("KVM_SET_USER_MEMORY_REGION failed: %d (%d)\n", errno, rc);
+
+ close(fd);
+ return 0;
+}
+
+
+static void printSReg(const char *pszName, struct kvm_segment const *pSReg)
+{
+ fprintf(stderr, " %5s=%04x base=%016llx limit=%08x type=%#x p=%d dpl=%d db=%d s=%d l=%d g=%d avl=%d un=%d\n",
+ pszName, pSReg->selector, pSReg->base, pSReg->limit, pSReg->type, pSReg->present, pSReg->dpl,
+ pSReg->db, pSReg->s, pSReg->l, pSReg->g, pSReg->avl, pSReg->unusable);
+}
+
+
+static int runtimeError(const char *pszFormat, ...)
+{
+ fprintf(stderr, "runtime error: ");
+ va_list va;
+ va_start(va, pszFormat);
+ vfprintf(stderr, pszFormat, va);
+ va_end(va);
+
+ fprintf(stderr, " exit_reason=%#010x\n", g_pVCpuRun->exit_reason);
+ fprintf(stderr, "ready_for_interrupt_injection=%#x\n", g_pVCpuRun->ready_for_interrupt_injection);
+ fprintf(stderr, " if_flag=%#x\n", g_pVCpuRun->if_flag);
+ fprintf(stderr, " flags=%#x\n", g_pVCpuRun->flags);
+ fprintf(stderr, " kvm_valid_regs=%#018llx\n", g_pVCpuRun->kvm_valid_regs);
+ fprintf(stderr, " kvm_dirty_regs=%#018llx\n", g_pVCpuRun->kvm_dirty_regs);
+
+ struct kvm_regs Regs;
+ memset(&Regs, 0, sizeof(Regs));
+ struct kvm_sregs SRegs;
+ memset(&SRegs, 0, sizeof(SRegs));
+ if ( ioctl(g_fdVCpu, KVM_GET_REGS, &Regs) != -1
+ && ioctl(g_fdVCpu, KVM_GET_SREGS, &SRegs) != -1)
+ {
+ fprintf(stderr, " rip=%016llx\n", Regs.rip);
+ printSReg("cs", &SRegs.cs);
+ fprintf(stderr, " rflags=%08llx\n", Regs.rflags);
+ fprintf(stderr, " rax=%016llx\n", Regs.rax);
+ fprintf(stderr, " rbx=%016llx\n", Regs.rcx);
+ fprintf(stderr, " rdx=%016llx\n", Regs.rdx);
+ fprintf(stderr, " rcx=%016llx\n", Regs.rbx);
+ fprintf(stderr, " rsp=%016llx\n", Regs.rsp);
+ fprintf(stderr, " rbp=%016llx\n", Regs.rbp);
+ fprintf(stderr, " rsi=%016llx\n", Regs.rsi);
+ fprintf(stderr, " rdi=%016llx\n", Regs.rdi);
+ printSReg("ss", &SRegs.ss);
+ printSReg("ds", &SRegs.ds);
+ printSReg("es", &SRegs.es);
+ printSReg("fs", &SRegs.fs);
+ printSReg("gs", &SRegs.gs);
+ printSReg("tr", &SRegs.tr);
+ printSReg("ldtr", &SRegs.ldt);
+
+ uint64_t const offMem = Regs.rip + SRegs.cs.base - MY_MEM_BASE;
+ if (offMem < g_cbMem - 10)
+ fprintf(stderr, " bytes at PC (%#zx): %02x %02x %02x %02x %02x %02x %02x %02x\n", (size_t)(offMem + MY_MEM_BASE),
+ g_pbMem[offMem ], g_pbMem[offMem + 1], g_pbMem[offMem + 2], g_pbMem[offMem + 3],
+ g_pbMem[offMem + 4], g_pbMem[offMem + 5], g_pbMem[offMem + 6], g_pbMem[offMem + 7]);
+ }
+
+ return 1;
+}
+
+static int runRealModeTest(unsigned cInstructions, const char *pszInstruction, unsigned fTest,
+ unsigned uEax, unsigned uEcx, unsigned uEdx, unsigned uEbx,
+ unsigned uEsp, unsigned uEbp, unsigned uEsi, unsigned uEdi)
+{
+ (void)fTest;
+
+ /*
+ * Setup real mode context.
+ */
+#define SET_SEG(a_SReg, a_Base, a_Limit, a_Sel, a_fCode) \
+ do { \
+ a_SReg.base = (a_Base); \
+ a_SReg.limit = (a_Limit); \
+ a_SReg.selector = (a_Sel); \
+ a_SReg.type = (a_fCode) ? 10 : 3; \
+ a_SReg.present = 1; \
+ a_SReg.dpl = 0; \
+ a_SReg.db = 0; \
+ a_SReg.s = 1; \
+ a_SReg.l = 0; \
+ a_SReg.g = 0; \
+ a_SReg.avl = 0; \
+ a_SReg.unusable = 0; \
+ a_SReg.padding = 0; \
+ } while (0)
+ struct kvm_regs Regs;
+ memset(&Regs, 0, sizeof(Regs));
+ Regs.rax = uEax;
+ Regs.rcx = uEcx;
+ Regs.rdx = uEdx;
+ Regs.rbx = uEbx;
+ Regs.rsp = uEsp;
+ Regs.rbp = uEbp;
+ Regs.rsi = uEsi;
+ Regs.rdi = uEdi;
+ Regs.rip = MY_TEST_RIP;
+ Regs.rflags = 2;
+ int rc = ioctl(g_fdVCpu, KVM_SET_REGS, &Regs);
+ if (rc != 0)
+ return error("KVM_SET_REGS failed: %d (rc=%d)\n", errno, rc);
+
+ struct kvm_sregs SRegs;
+ memset(&SRegs, 0, sizeof(SRegs));
+ rc = ioctl(g_fdVCpu, KVM_GET_SREGS, &SRegs);
+ if (rc != 0)
+ return error("KVM_GET_SREGS failed: %d (rc=%d)\n", errno, rc);
+ SET_SEG(SRegs.es, 0x00000, 0xffff, 0x0000, 0);
+ SET_SEG(SRegs.cs, 0x00000, 0xffff, 0x0000, 1);
+ SET_SEG(SRegs.ss, 0x00000, 0xffff, 0x0000, 0);
+ SET_SEG(SRegs.ds, 0x00000, 0xffff, 0x0000, 0);
+ SET_SEG(SRegs.fs, 0x00000, 0xffff, 0x0000, 0);
+ SET_SEG(SRegs.gs, 0x00000, 0xffff, 0x0000, 0);
+ //SRegs.cr0 = 0x10010 /*WP+ET*/;
+ SRegs.cr2 = 0;
+ //SRegs.cr3 = 0;
+ //SRegs.cr4 = 0;
+ rc = ioctl(g_fdVCpu, KVM_SET_SREGS, &SRegs);
+ if (rc != 0)
+ return error("KVM_SET_SREGS failed: %d (rc=%d)\n", errno, rc);
+
+ /*
+ * Run the test.
+ */
+ uint32_t cExits = 0;
+ uint64_t const nsStart = getNanoTS();
+ for (;;)
+ {
+ rc = ioctl(g_fdVCpu, KVM_RUN, (uintptr_t)0);
+ if (rc == 0)
+ {
+ cExits++;
+ if (g_pVCpuRun->exit_reason == KVM_EXIT_IO)
+ {
+ if (g_pVCpuRun->io.port == MY_NOP_PORT)
+ { /* likely: nop instruction */ }
+ else if (g_pVCpuRun->io.port == MY_TERM_PORT)
+ break;
+ else
+ return runtimeError("Unexpected I/O port access (for %s): %#x\n", pszInstruction, g_pVCpuRun->io.port);
+ }
+ else if (g_pVCpuRun->exit_reason == KVM_EXIT_MMIO)
+ {
+ if (g_pVCpuRun->mmio.phys_addr == MY_NOP_MMIO)
+ { /* likely: nop address */ }
+ else
+ return runtimeError("Unexpected memory access (for %s): %#llx\n", pszInstruction, g_pVCpuRun->mmio.phys_addr);
+ }
+ else
+ return runtimeError("Unexpected exit (for %s): %d\n", pszInstruction, g_pVCpuRun->exit_reason);
+ }
+ else
+ return runtimeError("KVM_RUN failed (for %s): %#x (ret %d)\n", pszInstruction, errno, rc);
+ }
+ uint64_t const nsElapsed = getNanoTS() - nsStart;
+ return reportResult(pszInstruction, cInstructions, nsElapsed, cExits);
+}
+
+
+#elif defined(RT_OS_DARWIN)
+
+/*
+ * Mac OS X - Hypervisor API.
+ */
+
+static int createVM(void)
+{
+ /* VM and VCpu */
+ hv_return_t rcHv = hv_vm_create(HV_VM_DEFAULT);
+ if (rcHv != HV_SUCCESS)
+ return error("hv_vm_create failed: %#x\n", rcHv);
+
+ g_idVCpu = -1;
+ rcHv = hv_vcpu_create(&g_idVCpu, HV_VCPU_DEFAULT);
+ if (rcHv != HV_SUCCESS)
+ return error("hv_vcpu_create failed: %#x\n", rcHv);
+
+ /* Memory. */
+ g_pbMem = (unsigned char *)mmap(NULL, g_cbMem, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0);
+ if ((void *)g_pbMem == MAP_FAILED)
+ return error("mmap RAM failed: %d\n", errno);
+ memset(g_pbMem, 0xf4, g_cbMem);
+
+ rcHv = hv_vm_map(g_pbMem, MY_MEM_BASE, g_cbMem, HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
+ if (rcHv != HV_SUCCESS)
+ return error("hv_vm_map failed: %#x\n", rcHv);
+
+ rcHv = hv_vm_protect(0x2000, 0x1000, HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
+ if (rcHv != HV_SUCCESS)
+ return error("hv_vm_protect failed: %#x\n", rcHv);
+ return 0;
+}
+
+
+static int runtimeError(const char *pszFormat, ...)
+{
+ fprintf(stderr, "runtime error: ");
+ va_list va;
+ va_start(va, pszFormat);
+ vfprintf(stderr, pszFormat, va);
+ va_end(va);
+
+ static struct { const char *pszName; uint32_t uField; uint32_t uFmt : 31; uint32_t fIsReg : 1; } const s_aFields[] =
+ {
+ { "VMCS_RO_EXIT_REASON", VMCS_RO_EXIT_REASON, 64, 0 },
+ { "VMCS_RO_EXIT_QUALIFIC", VMCS_RO_EXIT_QUALIFIC, 64, 0 },
+ { "VMCS_RO_INSTR_ERROR", VMCS_RO_INSTR_ERROR, 64, 0 },
+ { "VMCS_RO_VMEXIT_IRQ_INFO", VMCS_RO_VMEXIT_IRQ_INFO, 64, 0 },
+ { "VMCS_RO_VMEXIT_IRQ_ERROR", VMCS_RO_VMEXIT_IRQ_ERROR, 64, 0 },
+ { "VMCS_RO_VMEXIT_INSTR_LEN", VMCS_RO_VMEXIT_INSTR_LEN, 64, 0 },
+ { "VMCS_RO_VMX_INSTR_INFO", VMCS_RO_VMX_INSTR_INFO, 64, 0 },
+ { "VMCS_RO_GUEST_LIN_ADDR", VMCS_RO_GUEST_LIN_ADDR, 64, 0 },
+ { "VMCS_GUEST_PHYSICAL_ADDRESS",VMCS_GUEST_PHYSICAL_ADDRESS,64, 0 },
+ { "VMCS_RO_IO_RCX", VMCS_RO_IO_RCX, 64, 0 },
+ { "VMCS_RO_IO_RSI", VMCS_RO_IO_RSI, 64, 0 },
+ { "VMCS_RO_IO_RDI", VMCS_RO_IO_RDI, 64, 0 },
+ { "VMCS_RO_IO_RIP", VMCS_RO_IO_RIP, 64, 0 },
+ { "rip", HV_X86_RIP, 64, 1 },
+ { "rip (vmcs)", VMCS_GUEST_RIP, 64, 0 },
+ { "cs", HV_X86_CS, 16, 1 },
+ { "cs (vmcs)", VMCS_GUEST_CS, 16, 0 },
+ { "cs.base", VMCS_GUEST_CS_BASE, 64, 0 },
+ { "cs.limit", VMCS_GUEST_CS_LIMIT, 32, 0 },
+ { "cs.attr", VMCS_GUEST_CS_AR, 32, 0 },
+ { "rflags", HV_X86_RFLAGS, 32, 1 },
+ { "rax", HV_X86_RAX, 64, 1 },
+ { "rcx", HV_X86_RCX, 64, 1 },
+ { "rdx", HV_X86_RDX, 64, 1 },
+ { "rbx", HV_X86_RBX, 64, 1 },
+ { "rsp", HV_X86_RSP, 64, 1 },
+ { "rsp (vmcs)", VMCS_GUEST_RSP, 64, 0 },
+ { "ss", HV_X86_SS, 16, 1 },
+ { "ss (vmcs)", VMCS_GUEST_SS, 16, 0 },
+ { "ss.base", VMCS_GUEST_SS_BASE, 64, 0 },
+ { "ss.limit", VMCS_GUEST_SS_LIMIT, 32, 0 },
+ { "ss.attr", VMCS_GUEST_SS_AR, 32, 0 },
+ { "rbp", HV_X86_RBP, 64, 1 },
+ { "rsi", HV_X86_RSI, 64, 1 },
+ { "rdi", HV_X86_RDI, 64, 1 },
+ { "ds", HV_X86_DS, 16, 1 },
+ { "ds (vmcs)", VMCS_GUEST_DS, 16, 0 },
+ { "ds.base", VMCS_GUEST_DS_BASE, 64, 0 },
+ { "ds.limit", VMCS_GUEST_DS_LIMIT, 32, 0 },
+ { "ds.attr", VMCS_GUEST_DS_AR, 32, 0 },
+ { "es", HV_X86_ES, 16, 1 },
+ { "es (vmcs)", VMCS_GUEST_ES, 16, 0 },
+ { "es.base", VMCS_GUEST_ES_BASE, 64, 0 },
+ { "es.limit", VMCS_GUEST_ES_LIMIT, 32, 0 },
+ { "es.attr", VMCS_GUEST_ES_AR, 32, 0 },
+ { "fs", HV_X86_FS, 16, 1 },
+ { "fs (vmcs)", VMCS_GUEST_FS, 16, 0 },
+ { "fs.base", VMCS_GUEST_FS_BASE, 64, 0 },
+ { "fs.limit", VMCS_GUEST_FS_LIMIT, 32, 0 },
+ { "fs.attr", VMCS_GUEST_FS_AR, 32, 0 },
+ { "gs", HV_X86_GS, 16, 1 },
+ { "gs (vmcs)", VMCS_GUEST_GS, 16, 0 },
+ { "gs.base", VMCS_GUEST_GS_BASE, 64, 0 },
+ { "gs.limit", VMCS_GUEST_GS_LIMIT, 32, 0 },
+ { "gs.attr", VMCS_GUEST_GS_AR, 32, 0 },
+ { "cr0", HV_X86_CR0, 64, 1 },
+ { "cr0 (vmcs)", VMCS_GUEST_CR0, 64, 0 },
+ { "cr2", HV_X86_CR2, 64, 1 },
+ { "cr3", HV_X86_CR3, 64, 1 },
+ { "cr3 (vmcs)", VMCS_GUEST_CR3, 64, 0 },
+ { "cr4", HV_X86_CR4, 64, 1 },
+ { "cr4 (vmcs)", VMCS_GUEST_CR4, 64, 0 },
+ { "idtr.base", VMCS_GUEST_IDTR_BASE, 64, 0 },
+ { "idtr.limit", VMCS_GUEST_IDTR_LIMIT, 32, 0 },
+ { "gdtr.base", VMCS_GUEST_GDTR_BASE, 64, 0 },
+ { "gdtr.limit", VMCS_GUEST_GDTR_LIMIT, 32, 0 },
+
+ { "VMCS_CTRL_PIN_BASED", VMCS_CTRL_PIN_BASED, 64, 0 },
+ { "VMCS_CTRL_CPU_BASED", VMCS_CTRL_CPU_BASED, 64, 0 },
+ { "VMCS_CTRL_CPU_BASED2", VMCS_CTRL_CPU_BASED2, 64, 0 },
+ { "VMCS_CTRL_VMENTRY_CONTROLS", VMCS_CTRL_VMENTRY_CONTROLS, 64, 0 },
+ { "VMCS_CTRL_VMEXIT_CONTROLS", VMCS_CTRL_VMEXIT_CONTROLS, 64, 0 },
+ { "VMCS_CTRL_EXC_BITMAP", VMCS_CTRL_EXC_BITMAP, 64, 0 },
+ { "VMCS_CTRL_CR0_MASK", VMCS_CTRL_CR0_MASK, 64, 0 },
+ { "VMCS_CTRL_CR0_SHADOW", VMCS_CTRL_CR0_SHADOW, 64, 0 },
+ { "VMCS_CTRL_CR4_MASK", VMCS_CTRL_CR4_MASK, 64, 0 },
+ { "VMCS_CTRL_CR4_SHADOW", VMCS_CTRL_CR4_SHADOW, 64, 0 },
+ };
+ for (unsigned i = 0; i < sizeof(s_aFields) / sizeof(s_aFields[0]); i++)
+ {
+ uint64_t uValue = UINT64_MAX;
+ hv_return_t rcHv;
+ if (s_aFields[i].fIsReg)
+ rcHv = hv_vcpu_read_register(g_idVCpu, (hv_x86_reg_t)s_aFields[i].uField, &uValue);
+ else
+ rcHv = hv_vmx_vcpu_read_vmcs(g_idVCpu, s_aFields[i].uField, &uValue);
+ if (rcHv == HV_SUCCESS)
+ {
+ if (s_aFields[i].uFmt == 16)
+ fprintf(stderr, "%28s=%04llx\n", s_aFields[i].pszName, uValue);
+ else if (s_aFields[i].uFmt == 32)
+ fprintf(stderr, "%28s=%08llx\n", s_aFields[i].pszName, uValue);
+ else
+ fprintf(stderr, "%28s=%08x'%08x\n", s_aFields[i].pszName, (uint32_t)(uValue >> 32), (uint32_t)uValue);
+ }
+ else
+ fprintf(stderr, "%28s=<%s failed %#x>\n", s_aFields[i].pszName,
+ s_aFields[i].fIsReg ? "hv_vcpu_read_register" : "hv_vmx_vcpu_read_vmcs", rcHv);
+ }
+ return 1;
+}
+
+
+static int runRealModeTest(unsigned cInstructions, const char *pszInstruction, unsigned fTest,
+ unsigned uEax, unsigned uEcx, unsigned uEdx, unsigned uEbx,
+ unsigned uEsp, unsigned uEbp, unsigned uEsi, unsigned uEdi)
+{
+ /*
+ * Setup real mode context.
+ */
+#define WRITE_REG_RET(a_enmReg, a_uValue) \
+ do { \
+ hv_return_t rcHvX = hv_vcpu_write_register(g_idVCpu, a_enmReg, a_uValue); \
+ if (rcHvX == HV_SUCCESS) { /* likely */ } \
+ else return error("hv_vcpu_write_register(%#x, %s, %#llx) -> %#x\n", g_idVCpu, #a_enmReg, (uint64_t)(a_uValue), rcHvX); \
+ } while (0)
+#define READ_REG_RET(a_enmReg, a_puValue) \
+ do { \
+ hv_return_t rcHvX = hv_vcpu_read_register(g_idVCpu, a_enmReg, a_puValue); \
+ if (rcHvX == HV_SUCCESS) { /* likely */ } \
+ else return error("hv_vcpu_read_register(%#x, %s,) -> %#x\n", g_idVCpu, #a_enmReg, rcHvX); \
+ } while (0)
+#define WRITE_VMCS_RET(a_enmField, a_uValue) \
+ do { \
+ hv_return_t rcHvX = hv_vmx_vcpu_write_vmcs(g_idVCpu, a_enmField, a_uValue); \
+ if (rcHvX == HV_SUCCESS) { /* likely */ } \
+ else return error("hv_vmx_vcpu_write_vmcs(%#x, %s, %#llx) -> %#x\n", g_idVCpu, #a_enmField, (uint64_t)(a_uValue), rcHvX); \
+ } while (0)
+#define READ_VMCS_RET(a_enmField, a_puValue) \
+ do { \
+ hv_return_t rcHvX = hv_vmx_vcpu_read_vmcs(g_idVCpu, a_enmField, a_puValue); \
+ if (rcHvX == HV_SUCCESS) { /* likely */ } \
+ else return error("hv_vmx_vcpu_read_vmcs(%#x, %s,) -> %#x\n", g_idVCpu, #a_enmField, rcHvX); \
+ } while (0)
+#define READ_CAP_RET(a_enmCap, a_puValue) \
+ do { \
+ hv_return_t rcHvX = hv_vmx_read_capability(a_enmCap, a_puValue); \
+ if (rcHvX == HV_SUCCESS) { /* likely */ } \
+ else return error("hv_vmx_read_capability(%s) -> %#x\n", #a_enmCap); \
+ } while (0)
+#define CAP_2_CTRL(a_uCap, a_fWanted) ( ((a_fWanted) | (uint32_t)(a_uCap)) & (uint32_t)((a_uCap) >> 32) )
+#if 1
+ uint64_t uCap;
+ READ_CAP_RET(HV_VMX_CAP_PINBASED, &uCap);
+ WRITE_VMCS_RET(VMCS_CTRL_PIN_BASED, CAP_2_CTRL(uCap, PIN_BASED_INTR | PIN_BASED_NMI | PIN_BASED_VIRTUAL_NMI));
+ READ_CAP_RET(HV_VMX_CAP_PROCBASED, &uCap);
+ WRITE_VMCS_RET(VMCS_CTRL_CPU_BASED, CAP_2_CTRL(uCap, CPU_BASED_HLT
+ | CPU_BASED_INVLPG
+ | CPU_BASED_MWAIT
+ | CPU_BASED_RDPMC
+ | CPU_BASED_RDTSC
+ | CPU_BASED_CR3_LOAD
+ | CPU_BASED_CR3_STORE
+ | CPU_BASED_CR8_LOAD
+ | CPU_BASED_CR8_STORE
+ | CPU_BASED_MOV_DR
+ | CPU_BASED_UNCOND_IO
+ | CPU_BASED_MONITOR
+ | CPU_BASED_PAUSE
+ ));
+ READ_CAP_RET(HV_VMX_CAP_PROCBASED2, &uCap);
+ WRITE_VMCS_RET(VMCS_CTRL_CPU_BASED2, CAP_2_CTRL(uCap, 0));
+ READ_CAP_RET(HV_VMX_CAP_ENTRY, &uCap);
+ WRITE_VMCS_RET(VMCS_CTRL_VMENTRY_CONTROLS, CAP_2_CTRL(uCap, 0));
+#endif
+ WRITE_VMCS_RET(VMCS_CTRL_EXC_BITMAP, UINT32_MAX);
+ WRITE_VMCS_RET(VMCS_CTRL_CR0_MASK, 0x60000000);
+ WRITE_VMCS_RET(VMCS_CTRL_CR0_SHADOW, 0x00000000);
+ WRITE_VMCS_RET(VMCS_CTRL_CR4_MASK, 0x00000000);
+ WRITE_VMCS_RET(VMCS_CTRL_CR4_SHADOW, 0x00000000);
+
+ WRITE_REG_RET(HV_X86_RAX, uEax);
+ WRITE_REG_RET(HV_X86_RCX, uEcx);
+ WRITE_REG_RET(HV_X86_RDX, uEdx);
+ WRITE_REG_RET(HV_X86_RBX, uEbx);
+ WRITE_REG_RET(HV_X86_RSP, uEsp);
+ WRITE_REG_RET(HV_X86_RBP, uEbp);
+ WRITE_REG_RET(HV_X86_RSI, uEsi);
+ WRITE_REG_RET(HV_X86_RDI, uEdi);
+ WRITE_REG_RET(HV_X86_RIP, MY_TEST_RIP);
+ WRITE_REG_RET(HV_X86_RFLAGS, 2);
+ WRITE_REG_RET(HV_X86_ES, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_ES_BASE, 0x0000000);
+ WRITE_VMCS_RET(VMCS_GUEST_ES_LIMIT, 0xffff);
+ WRITE_VMCS_RET(VMCS_GUEST_ES_AR, 0x93);
+ WRITE_REG_RET(HV_X86_CS, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_CS_BASE, 0x0000000);
+ WRITE_VMCS_RET(VMCS_GUEST_CS_LIMIT, 0xffff);
+ WRITE_VMCS_RET(VMCS_GUEST_CS_AR, 0x9b);
+ WRITE_REG_RET(HV_X86_SS, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_SS_BASE, 0x0000000);
+ WRITE_VMCS_RET(VMCS_GUEST_SS_LIMIT, 0xffff);
+ WRITE_VMCS_RET(VMCS_GUEST_SS_AR, 0x93);
+ WRITE_REG_RET(HV_X86_DS, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_DS_BASE, 0x0000000);
+ WRITE_VMCS_RET(VMCS_GUEST_DS_LIMIT, 0xffff);
+ WRITE_VMCS_RET(VMCS_GUEST_DS_AR, 0x93);
+ WRITE_REG_RET(HV_X86_FS, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_FS_BASE, 0x0000000);
+ WRITE_VMCS_RET(VMCS_GUEST_FS_LIMIT, 0xffff);
+ WRITE_VMCS_RET(VMCS_GUEST_FS_AR, 0x93);
+ WRITE_REG_RET(HV_X86_GS, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_GS_BASE, 0x0000000);
+ WRITE_VMCS_RET(VMCS_GUEST_GS_LIMIT, 0xffff);
+ WRITE_VMCS_RET(VMCS_GUEST_GS_AR, 0x93);
+ //WRITE_REG_RET(HV_X86_CR0, 0x10030 /*WP+NE+ET*/);
+ WRITE_VMCS_RET(VMCS_GUEST_CR0, 0x10030 /*WP+NE+ET*/);
+ //WRITE_REG_RET(HV_X86_CR2, 0);
+ //WRITE_REG_RET(HV_X86_CR3, 0);
+ WRITE_VMCS_RET(VMCS_GUEST_CR3, 0);
+ //WRITE_REG_RET(HV_X86_CR4, 0x2000);
+ WRITE_VMCS_RET(VMCS_GUEST_CR4, 0x2000);
+ WRITE_VMCS_RET(VMCS_GUEST_LDTR, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_LDTR_BASE, 0x00000000);
+ WRITE_VMCS_RET(VMCS_GUEST_LDTR_LIMIT, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_LDTR_AR, 0x10000);
+ WRITE_VMCS_RET(VMCS_GUEST_TR, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_TR_BASE, 0x00000000);
+ WRITE_VMCS_RET(VMCS_GUEST_TR_LIMIT, 0x0000);
+ WRITE_VMCS_RET(VMCS_GUEST_TR_AR, 0x00083);
+ hv_vcpu_flush(g_idVCpu);
+ hv_vcpu_invalidate_tlb(g_idVCpu);
+
+ /*
+ * Run the test.
+ */
+ uint32_t cExits = 0;
+ uint64_t const nsStart = getNanoTS();
+ for (;;)
+ {
+ hv_return_t rcHv = hv_vcpu_run(g_idVCpu);
+ if (rcHv == HV_SUCCESS)
+ {
+ cExits++;
+ uint64_t uExitReason = UINT64_MAX;
+ READ_VMCS_RET(VMCS_RO_EXIT_REASON, &uExitReason);
+ if (!(uExitReason & UINT64_C(0x80000000)))
+ {
+ if (uExitReason == VMX_REASON_IO)
+ {
+ uint64_t uIoQual = UINT64_MAX;
+ READ_VMCS_RET(VMCS_RO_EXIT_QUALIFIC, &uIoQual);
+ if ((uint16_t)(uIoQual >> 16) == MY_NOP_PORT && (fTest & MY_TEST_F_NOP_IO))
+ { /* likely: nop instruction */ }
+ else if ((uint16_t)(uIoQual >> 16) == MY_TERM_PORT)
+ break;
+ else
+ return runtimeError("Unexpected I/O port access (for %s): %#x\n", pszInstruction, (uint16_t)(uIoQual >> 16));
+
+ /* Advance RIP. */
+ uint64_t cbInstr = UINT64_MAX;
+ READ_VMCS_RET(VMCS_RO_VMEXIT_INSTR_LEN, &cbInstr);
+ if (cbInstr < 1 || cbInstr > 15)
+ return runtimeError("Bad instr len: %#llx\n", cbInstr);
+ uint64_t uRip = UINT64_MAX;
+ READ_REG_RET(HV_X86_RIP, &uRip);
+ WRITE_REG_RET(HV_X86_RIP, uRip + cbInstr);
+ }
+ else if (uExitReason == VMX_REASON_CPUID && (fTest & MY_TEST_F_CPUID))
+ {
+ /* Set registers and advance RIP. */
+ WRITE_REG_RET(HV_X86_RAX, 0x42424242);
+ WRITE_REG_RET(HV_X86_RCX, 0x04242424);
+ WRITE_REG_RET(HV_X86_RDX, 0x00424242);
+ WRITE_REG_RET(HV_X86_RBX, 0x00024242);
+
+ uint64_t cbInstr = UINT64_MAX;
+ READ_VMCS_RET(VMCS_RO_VMEXIT_INSTR_LEN, &cbInstr);
+ if (cbInstr < 1 || cbInstr > 15)
+ return runtimeError("Bad instr len: %#llx\n", cbInstr);
+ uint64_t uRip = UINT64_MAX;
+ READ_REG_RET(HV_X86_RIP, &uRip);
+ WRITE_REG_RET(HV_X86_RIP, uRip + cbInstr);
+ }
+ else if (uExitReason == VMX_REASON_EPT_VIOLATION)
+ {
+ uint64_t uEptQual = UINT64_MAX;
+ READ_VMCS_RET(VMCS_RO_EXIT_QUALIFIC, &uEptQual);
+ uint64_t GCPhys = UINT64_MAX;
+ READ_VMCS_RET(VMCS_GUEST_PHYSICAL_ADDRESS, &GCPhys);
+ if (GCPhys == MY_NOP_MMIO && (fTest & MY_TEST_F_NOP_MMIO))
+ { /* likely */ }
+ else if (GCPhys == MY_TEST_RIP)
+ continue; /* dunno why we get this, but restarting it works */
+ else
+ return runtimeError("Unexpected EPT viotaion at %#llx\n", GCPhys);
+
+ /* Set RAX and advance RIP. */
+ WRITE_REG_RET(HV_X86_RAX, 42);
+
+ uint64_t cbInstr = UINT64_MAX;
+ READ_VMCS_RET(VMCS_RO_VMEXIT_INSTR_LEN, &cbInstr);
+ if (cbInstr < 1 || cbInstr > 15)
+ return runtimeError("Bad instr len: %#llx\n", cbInstr);
+ uint64_t uRip = UINT64_MAX;
+ READ_REG_RET(HV_X86_RIP, &uRip);
+ WRITE_REG_RET(HV_X86_RIP, uRip + cbInstr);
+ }
+ else if (uExitReason == VMX_REASON_IRQ)
+ { /* ignore */ }
+ else
+ return runtimeError("Unexpected exit reason: %#x\n", uExitReason);
+ }
+ else
+ return runtimeError("VM entry failure: %#x\n", uExitReason);
+ }
+ else
+ return runtimeError("hv_vcpu_run failed (for %s): %#x\n", pszInstruction, rcHv);
+ }
+ uint64_t const nsElapsed = getNanoTS() - nsStart;
+ return reportResult(pszInstruction, cInstructions, nsElapsed, cExits);
+}
+
+#else
+# error "port me"
+#endif
+
+void dumpCode(uint8_t const *pb, uint8_t *pbEnd)
+{
+ printf("testing:");
+ for (; pb != pbEnd; pb++)
+ printf(" %02x", *pb);
+ printf("\n");
+}
+
+
+int ioportTest(unsigned cFactor)
+{
+ /*
+ * Produce realmode code
+ */
+ unsigned char *pb = &g_pbMem[MY_TEST_RIP - MY_MEM_BASE];
+ unsigned char * const pbStart = pb;
+ /* OUT DX, AL - 10 times */
+ for (unsigned i = 0; i < 10; i++)
+ *pb++ = 0xee;
+ /* DEC ECX */
+ *pb++ = 0x66;
+ *pb++ = 0x48 + 1;
+ /* JNZ MY_TEST_RIP */
+ *pb++ = 0x75;
+ *pb = (signed char)(pbStart - pb - 1);
+ pb++;
+ /* OUT 1, AL - Temination port call. */
+ *pb++ = 0xe6;
+ *pb++ = MY_TERM_PORT;
+ /* JMP to previous instruction */
+ *pb++ = 0xeb;
+ *pb++ = 0xfc;
+ dumpCode(pbStart, pb);
+
+ return runRealModeTest(100000 * cFactor, "OUT", MY_TEST_F_NOP_IO,
+ 42 /*eax*/, 10000 * cFactor /*ecx*/, MY_NOP_PORT /*edx*/, 0 /*ebx*/,
+ 0 /*esp*/, 0 /*ebp*/, 0 /*esi*/, 0 /*uEdi*/);
+}
+
+
+int cpuidTest(unsigned cFactor)
+{
+ /*
+ * Produce realmode code
+ */
+ unsigned char *pb = &g_pbMem[MY_TEST_RIP - MY_MEM_BASE];
+ unsigned char * const pbStart = pb;
+ for (unsigned i = 0; i < 10; i++)
+ {
+ /* XOR EAX,EAX */
+ *pb++ = 0x66;
+ *pb++ = 0x33;
+ *pb++ = 0xc0;
+
+ /* CPUID */
+ *pb++ = 0x0f;
+ *pb++ = 0xa2;
+ }
+ /* DEC ESI */
+ *pb++ = 0x66;
+ *pb++ = 0x48 + 6;
+ /* JNZ MY_TEST_RIP */
+ *pb++ = 0x75;
+ *pb = (signed char)(pbStart - pb - 1);
+ pb++;
+ /* OUT 1, AL - Temination port call. */
+ *pb++ = 0xe6;
+ *pb++ = MY_TERM_PORT;
+ /* JMP to previous instruction */
+ *pb++ = 0xeb;
+ *pb++ = 0xfc;
+ dumpCode(pbStart, pb);
+
+ return runRealModeTest(100000 * cFactor, "CPUID", MY_TEST_F_CPUID,
+ 0 /*eax*/, 0 /*ecx*/, 0 /*edx*/, 0 /*ebx*/,
+ 0 /*esp*/, 0 /*ebp*/, 10000 * cFactor /*esi*/, 0 /*uEdi*/);
+}
+
+
+int mmioTest(unsigned cFactor)
+{
+ /*
+ * Produce realmode code accessing MY_MMIO_NOP address assuming it's low.
+ */
+ unsigned char *pb = &g_pbMem[MY_TEST_RIP - MY_MEM_BASE];
+ unsigned char * const pbStart = pb;
+ for (unsigned i = 0; i < 10; i++)
+ {
+ /* MOV AL,DS:[BX] */
+ *pb++ = 0x8a;
+ *pb++ = 0x07;
+ }
+ /* DEC ESI */
+ *pb++ = 0x66;
+ *pb++ = 0x48 + 6;
+ /* JNZ MY_TEST_RIP */
+ *pb++ = 0x75;
+ *pb = (signed char)(pbStart - pb - 1);
+ pb++;
+ /* OUT 1, AL - Temination port call. */
+ *pb++ = 0xe6;
+ *pb++ = MY_TERM_PORT;
+ /* JMP to previous instruction */
+ *pb++ = 0xeb;
+ *pb++ = 0xfc;
+ dumpCode(pbStart, pb);
+
+ return runRealModeTest(100000 * cFactor, "MMIO/r1", MY_TEST_F_NOP_MMIO,
+ 0 /*eax*/, 0 /*ecx*/, 0 /*edx*/, MY_NOP_MMIO /*ebx*/,
+ 0 /*esp*/, 0 /*ebp*/, 10000 * cFactor /*esi*/, 0 /*uEdi*/);
+}
+
+
+
+int main(int argc, char **argv)
+{
+ /*
+ * Do some parameter parsing.
+ */
+#ifdef RT_OS_WINDOWS
+ unsigned const cFactorDefault = 4;
+#elif RT_OS_DARWIN
+ unsigned const cFactorDefault = 32;
+#else
+ unsigned const cFactorDefault = 24;
+#endif
+ unsigned cFactor = cFactorDefault;
+ for (int i = 1; i < argc; i++)
+ {
+ const char *pszArg = argv[i];
+ if ( strcmp(pszArg, "--help") == 0
+ || strcmp(pszArg, "/help") == 0
+ || strcmp(pszArg, "-h") == 0
+ || strcmp(pszArg, "-?") == 0
+ || strcmp(pszArg, "/?") == 0)
+ {
+ printf("Does some benchmarking of the native NEM engine.\n"
+ "\n"
+ "Usage: NemRawBench-1 --factor <factor>\n"
+ "\n"
+ "Options\n"
+ " --factor <factor>\n"
+ " Iteration count factor. Default is %u.\n"
+ " Lower it if execution is slow, increase if quick.\n",
+ cFactorDefault);
+ return 0;
+ }
+ if (strcmp(pszArg, "--factor") == 0)
+ {
+ i++;
+ if (i < argc)
+ cFactor = atoi(argv[i]);
+ else
+ {
+ fprintf(stderr, "syntax error: Option %s is takes a value!\n", pszArg);
+ return 2;
+ }
+ }
+ else
+ {
+ fprintf(stderr, "syntax error: Unknown option: %s\n", pszArg);
+ return 2;
+ }
+ }
+
+ /*
+ * Create the VM
+ */
+ g_cbMem = 128*1024 - MY_MEM_BASE;
+ int rcExit = createVM();
+ if (rcExit == 0)
+ {
+ printf("tstNemBench-1: Successfully created test VM...\n");
+
+ /*
+ * Do the benchmarking.
+ */
+ ioportTest(cFactor);
+ cpuidTest(cFactor);
+ mmioTest(cFactor);
+
+ printf("tstNemBench-1: done\n");
+ }
+ return rcExit;
+}
+
+/*
+ * Results:
+ *
+ * - Darwin/xnu 10.12.6/16.7.0; 3.1GHz Intel Core i7-7920HQ (Kaby Lake):
+ * 925 845 OUT instructions per second (3 200 307 exits in 3 456 301 621 ns)
+ * 949 278 CPUID instructions per second (3 200 222 exits in 3 370 980 173 ns)
+ * 871 499 MMIO/r1 instructions per second (3 200 223 exits in 3 671 834 221 ns)
+ *
+ * - Linux 4.15.0 / ubuntu 18.04.1 Desktop LiveCD; 3.1GHz Intel Core i7-7920HQ (Kaby Lake):
+ * 829 775 OUT instructions per second (3 200 001 exits in 3 856 466 567 ns)
+ * 2 212 038 CPUID instructions per second (1 exits in 1 446 629 591 ns) [1]
+ * 477 962 MMIO/r1 instructions per second (3 200 001 exits in 6 695 090 600 ns)
+ *
+ * - Linux 4.15.0 / ubuntu 18.04.1 Desktop LiveCD; 3.4GHz Core i5-3570 (Ivy Bridge):
+ * 717 216 OUT instructions per second (2 400 001 exits in 3 346 271 640 ns)
+ * 1 675 983 CPUID instructions per second (1 exits in 1 431 995 135 ns) [1]
+ * 402 621 MMIO/r1 instructions per second (2 400 001 exits in 5 960 930 854 ns)
+ *
+ * - Linux 4.18.0-1-amd64 (debian); 3.4GHz AMD Threadripper 1950X:
+ * 455 727 OUT instructions per second (2 400 001 exits in 5 266 300 471 ns)
+ * 1 745 014 CPUID instructions per second (1 exits in 1 375 346 658 ns) [1]
+ * 351 767 MMIO/r1 instructions per second (2 400 001 exits in 6 822 684 544 ns)
+ *
+ * - Windows 1803 updated as per 2018-10-01; 3.4GHz Core i5-3570 (Ivy Bridge):
+ * 67 778 OUT instructions per second (400 001 exits in 5 901 560 700 ns)
+ * 66 113 CPUID instructions per second (400 001 exits in 6 050 208 000 ns)
+ * 62 939 MMIO/r1 instructions per second (400 001 exits in 6 355 302 900 ns)
+ *
+ * - Windows 1803 updated as per 2018-09-28; 3.4GHz AMD Threadripper 1950X:
+ * 34 485 OUT instructions per second (400 001 exits in 11 598 918 200 ns)
+ * 34 043 CPUID instructions per second (400 001 exits in 11 749 753 200 ns)
+ * 33 124 MMIO/r1 instructions per second (400 001 exits in 12 075 617 000 ns)
+ *
+ * - Windows build 17763; 3.4GHz AMD Threadripper 1950X:
+ * 65 633 OUT instructions per second (400 001 exits in 6 094 409 100 ns)
+ * 65 245 CPUID instructions per second (400 001 exits in 6 130 720 600 ns)
+ * 61 642 MMIO/r1 instructions per second (400 001 exits in 6 489 013 700 ns)
+ *
+ *
+ * [1] CPUID causes no return to ring-3 with KVM.
+ *
+ *
+ * For reference we can compare with similar tests in bs2-test1 running VirtualBox:
+ *
+ * - Linux 4.18.0-1-amd64 (debian); 3.4GHz AMD Threadripper 1950X; trunk/r125404:
+ * real mode, 32-bit OUT : 1 338 471 ins/sec
+ * real mode, 32-bit OUT-to-ring-3 : 500 337 ins/sec
+ * real mode, CPUID : 1 566 343 ins/sec
+ * real mode, 32-bit write : 870 671 ins/sec
+ * real mode, 32-bit write-to-ring-3: 391 014 ins/sec
+ *
+ * - Darwin/xnu 10.12.6/16.7.0; 3.1GHz Intel Core i7-7920HQ (Kaby Lake); trunk/r125404:
+ * real mode, 32-bit OUT : 790 117 ins/sec
+ * real mode, 32-bit OUT-to-ring-3 : 157 205 ins/sec
+ * real mode, CPUID : 1 001 087 ins/sec
+ * real mode, 32-bit write : 651 257 ins/sec
+ * real mode, 32-bit write-to-ring-3: 157 773 ins/sec
+ *
+ * - Linux 4.15.0 / ubuntu 18.04.1 Desktop LiveCD; 3.1GHz Intel Core i7-7920HQ (Kaby Lake); trunk/r125450:
+ * real mode, 32-bit OUT : 1 229 245 ins/sec
+ * real mode, 32-bit OUT-to-ring-3 : 284 848 ins/sec
+ * real mode, CPUID : 1 429 760 ins/sec
+ * real mode, 32-bit write : 820 679 ins/sec
+ * real mode, 32-bit write-to-ring-3: 245 159 ins/sec
+ *
+ * - Windows 1803 updated as per 2018-10-01; 3.4GHz Core i5-3570 (Ivy Bridge); trunk/r15442:
+ * real mode, 32-bit OUT : 961 939 ins/sec
+ * real mode, 32-bit OUT-to-ring-3 : 189 458 ins/sec
+ * real mode, CPUID : 1 060 582 ins/sec
+ * real mode, 32-bit write : 637 967 ins/sec
+ * real mode, 32-bit write-to-ring-3: 148 573 ins/sec
+ *
+ */
diff --git a/src/VBox/VMM/testcase/dev.tar.gz b/src/VBox/VMM/testcase/dev.tar.gz
new file mode 100644
index 00000000..95d3a358
--- /dev/null
+++ b/src/VBox/VMM/testcase/dev.tar.gz
Binary files differ
diff --git a/src/VBox/VMM/testcase/mkdsk.sh b/src/VBox/VMM/testcase/mkdsk.sh
new file mode 100755
index 00000000..f6115cc0
--- /dev/null
+++ b/src/VBox/VMM/testcase/mkdsk.sh
@@ -0,0 +1,76 @@
+#!/bin/sh
+## @file
+# Obsolete?
+#
+
+#
+# Copyright (C) 2006-2020 Oracle Corporation
+#
+# This file is part of VirtualBox Open Source Edition (OSE), as
+# available from http://www.virtualbox.org. This file is free software;
+# you can redistribute it and/or modify it under the terms of the GNU
+# General Public License (GPL) as published by the Free Software
+# Foundation, in version 2 as it comes in the "COPYING" file of the
+# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+#
+
+if [ "x$3" == "x" ]; then
+
+ echo "syntax error"
+ echo "syntax: $0 imagename <size-in-KBs> <init prog> [tar files]"
+ echo ""
+ echo "Simples qemu boot image is archived by only specifying an statically"
+ echo "linked init program and using the dev.tar.gz file to create devices."
+ echo "The boot linux in qemu specifying the image as -hda. Use the -kernel"
+ echo "option to specify a bzImage kernel image to use, and specify"
+ echo "-append root=/dev/hda so the kernel will mount /dev/hda and look"
+ echo "for /sbin/init there."
+ echo ""
+ echo "Example:"
+ echo " sh ./mkdsk.sh foo.img 2048 ~/VBox/Tree/out/linux/debug/bin/tstProg1 dev.tar.gz"
+ echo " qemu -hda foo.img -m 32 -kernel ~/qemutest/linux-test/bzImage-2.4.21 -append root=/dev/hda"
+ exit 1
+fi
+
+image=$1
+size=$2
+init=$3
+
+sizebytes=`expr $size '*' 1024`
+cyls=`expr 8225280 / $sizebytes`
+echo $cyls
+
+echo "* Creating $image of $size kb...."
+rm -f $image
+dd if=/dev/zero of=$image count=$size bs=1024 || exit 1
+
+echo "* Formatting with ext2..."
+/sbin/mkfs.ext2 $image || exit 1
+
+echo "* Mounting temporarily at ./tmpmnt..."
+mkdir -p tmpmnt
+sudo mount $image ./tmpmnt -t ext2 -o loop=/dev/loop7 || exit 1
+
+# init
+echo "* Copying $init to sbin/init..."
+mkdir tmpmnt/sbin
+sudo cp $init tmpmnt/sbin/init
+sudo chmod 755 tmpmnt/sbin/init
+
+shift
+shift
+shift
+while [ "x$1" != "x" ];
+do
+ echo "* Untarring $1 to disk..."
+ sudo tar -xzv -C tmpmnt -f $1
+ shift
+done
+
+echo "* Unmounting tmpmnt..."
+sudo umount tmpmnt
+rmdir tmpmnt
+echo "* Done! (Perhaps even successfully so...)"
+echo " 'root=/dev/hda' remember :-)"
+exit 0
diff --git a/src/VBox/VMM/testcase/tstAnimate.cpp b/src/VBox/VMM/testcase/tstAnimate.cpp
new file mode 100644
index 00000000..32a1ef44
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstAnimate.cpp
@@ -0,0 +1,943 @@
+/* $Id: tstAnimate.cpp $ */
+/** @file
+ * VBox Animation Testcase / Tool.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/em.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/ssm.h>
+#include <VBox/vmm/dbgf.h>
+#include <VBox/err.h>
+#include <VBox/vmm/pdmifs.h>
+#include <VBox/param.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/alloc.h>
+#include <iprt/initterm.h>
+#include <iprt/semaphore.h>
+#include <iprt/string.h>
+#include <iprt/stream.h>
+#include <iprt/file.h>
+#include <iprt/thread.h>
+#include <iprt/ctype.h>
+#include <iprt/uuid.h>
+
+#include <signal.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static volatile bool g_fSignaled = false;
+
+
+static void SigInterrupt(int iSignal)
+{
+ NOREF(iSignal);
+ signal(SIGINT, SigInterrupt);
+ g_fSignaled = true;
+ RTPrintf("caught SIGINT\n");
+}
+
+typedef DECLCALLBACK(int) FNSETGUESTGPR(PVM, uint32_t);
+typedef FNSETGUESTGPR *PFNSETGUESTGPR;
+static int scriptGPReg(PVM pVM, char *pszVar, char *pszValue, void *pvUser)
+{
+ NOREF(pszVar);
+ uint32_t u32;
+ int rc = RTStrToUInt32Ex(pszValue, NULL, 16, &u32);
+ if (RT_FAILURE(rc))
+ return rc;
+ return ((PFNSETGUESTGPR)(uintptr_t)pvUser)(pVM, u32);
+}
+
+typedef DECLCALLBACK(int) FNSETGUESTSEL(PVM, uint16_t);
+typedef FNSETGUESTSEL *PFNSETGUESTSEL;
+static int scriptSelReg(PVM pVM, char *pszVar, char *pszValue, void *pvUser)
+{
+ NOREF(pszVar);
+ uint16_t u16;
+ int rc = RTStrToUInt16Ex(pszValue, NULL, 16, &u16);
+ if (RT_FAILURE(rc))
+ return rc;
+ return ((PFNSETGUESTSEL)(uintptr_t)pvUser)(pVM, u16);
+}
+
+typedef DECLCALLBACK(int) FNSETGUESTSYS(PVM, uint32_t);
+typedef FNSETGUESTSYS *PFNSETGUESTSYS;
+static int scriptSysReg(PVM pVM, char *pszVar, char *pszValue, void *pvUser)
+{
+ NOREF(pszVar);
+ uint32_t u32;
+ int rc = RTStrToUInt32Ex(pszValue, NULL, 16, &u32);
+ if (RT_FAILURE(rc))
+ return rc;
+ return ((PFNSETGUESTSYS)(uintptr_t)pvUser)(pVM, u32);
+}
+
+
+typedef DECLCALLBACK(int) FNSETGUESTDTR(PVM, uint32_t, uint16_t);
+typedef FNSETGUESTDTR *PFNSETGUESTDTR;
+static int scriptDtrReg(PVM pVM, char *pszVar, char *pszValue, void *pvUser)
+{
+ NOREF(pszVar);
+ char *pszPart2 = strchr(pszValue, ':');
+ if (!pszPart2)
+ return -1;
+ *pszPart2++ = '\0';
+ pszPart2 = RTStrStripL(pszPart2);
+ pszValue = RTStrStripR(pszValue);
+
+ uint32_t u32;
+ int rc = RTStrToUInt32Ex(pszValue, NULL, 16, &u32);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ uint16_t u16;
+ rc = RTStrToUInt16Ex(pszPart2, NULL, 16, &u16);
+ if (RT_FAILURE(rc))
+ return rc;
+
+ return ((PFNSETGUESTDTR)(uintptr_t)pvUser)(pVM, u32, u16);
+}
+
+
+
+
+/* variables - putting in global scope to avoid MSC warning C4640. */
+static struct
+{
+ const char *pszVar;
+ int (*pfnHandler)(PVM pVM, char *pszVar, char *pszValue, void *pvUser);
+ PFNRT pvUser;
+} g_aVars[] =
+{
+ { "eax", scriptGPReg, (PFNRT)CPUMSetGuestEAX },
+ { "ebx", scriptGPReg, (PFNRT)CPUMSetGuestEBX },
+ { "ecx", scriptGPReg, (PFNRT)CPUMSetGuestECX },
+ { "edx", scriptGPReg, (PFNRT)CPUMSetGuestEDX },
+ { "esp", scriptGPReg, (PFNRT)CPUMSetGuestESP },
+ { "ebp", scriptGPReg, (PFNRT)CPUMSetGuestEBP },
+ { "esi", scriptGPReg, (PFNRT)CPUMSetGuestESI },
+ { "edi", scriptGPReg, (PFNRT)CPUMSetGuestEDI },
+ { "efl", scriptGPReg, (PFNRT)CPUMSetGuestEFlags },
+ { "eip", scriptGPReg, (PFNRT)CPUMSetGuestEIP },
+ { "ss", scriptSelReg, (PFNRT)CPUMSetGuestSS },
+ { "cs", scriptSelReg, (PFNRT)CPUMSetGuestCS },
+ { "ds", scriptSelReg, (PFNRT)CPUMSetGuestDS },
+ { "es", scriptSelReg, (PFNRT)CPUMSetGuestES },
+ { "fs", scriptSelReg, (PFNRT)CPUMSetGuestFS },
+ { "gs", scriptSelReg, (PFNRT)CPUMSetGuestGS },
+ { "cr0", scriptSysReg, (PFNRT)CPUMSetGuestCR0 },
+ { "cr2", scriptSysReg, (PFNRT)CPUMSetGuestCR2 },
+ { "cr3", scriptSysReg, (PFNRT)CPUMSetGuestCR3 },
+ { "cr4", scriptSysReg, (PFNRT)CPUMSetGuestCR4 },
+ { "ldtr",scriptSelReg, (PFNRT)CPUMSetGuestLDTR },
+ { "tr", scriptSelReg, (PFNRT)CPUMSetGuestTR },
+ { "idtr",scriptDtrReg, (PFNRT)CPUMSetGuestIDTR },
+ { "gdtr",scriptDtrReg, (PFNRT)CPUMSetGuestGDTR }
+};
+
+
+static int scriptCommand(PVM pVM, const char *pszIn, size_t cch)
+{
+ NOREF(cch);
+ int rc = VINF_SUCCESS;
+ char *psz = RTStrDup(pszIn);
+ char *pszEqual = strchr(psz, '=');
+ if (pszEqual)
+ {
+ /*
+ * var = value
+ */
+ *pszEqual = '\0';
+ RTStrStripR(psz);
+ char *pszValue = RTStrStrip(pszEqual + 1);
+
+ rc = -1;
+ for (unsigned i = 0; i < RT_ELEMENTS(g_aVars); i++)
+ {
+ if (!strcmp(psz, g_aVars[i].pszVar))
+ {
+ rc = g_aVars[i].pfnHandler(pVM, psz, pszValue, (void *)(uintptr_t)g_aVars[i].pvUser);
+ break;
+ }
+ }
+ }
+
+ RTStrFree(psz);
+ return rc;
+}
+
+static DECLCALLBACK(int) scriptRun(PVM pVM, RTFILE File)
+{
+ RTPrintf("info: running script...\n");
+ uint64_t cb;
+ int rc = RTFileQuerySize(File, &cb);
+ if (RT_SUCCESS(rc))
+ {
+ if (cb == 0)
+ return VINF_SUCCESS;
+ if (cb < _1M)
+ {
+ char *pszBuf = (char *)RTMemAllocZ(cb + 1);
+ if (pszBuf)
+ {
+ rc = RTFileRead(File, pszBuf, cb, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ pszBuf[cb] = '\0';
+
+ /*
+ * Now process what's in the buffer.
+ */
+ char *psz = pszBuf;
+ while (psz && *psz)
+ {
+ /* skip blanks. */
+ while (RT_C_IS_SPACE(*psz))
+ psz++;
+ if (!*psz)
+ break;
+
+ /* end of line */
+ char *pszNext;
+ char *pszEnd = strchr(psz, '\n');
+ if (!pszEnd)
+ pszEnd = strchr(psz, '\r');
+ if (!pszEnd)
+ pszNext = pszEnd = strchr(psz, '\0');
+ else
+ pszNext = pszEnd + 1;
+
+ if (*psz != ';' && *psz != '#' && *psz != '/')
+ {
+ /* strip end */
+ *pszEnd = '\0';
+ while (pszEnd > psz && RT_C_IS_SPACE(pszEnd[-1]))
+ *--pszEnd = '\0';
+
+ /* process the line */
+ RTPrintf("debug: executing script line '%s'\n", psz);
+ rc = scriptCommand(pVM, psz, pszEnd - psz);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("error: '%s' failed: %Rrc\n", psz, rc);
+ break;
+ }
+ }
+ /* else comment line */
+
+ /* next */
+ psz = pszNext;
+ }
+
+ }
+ else
+ RTPrintf("error: failed to read script file: %Rrc\n", rc);
+ RTMemFree(pszBuf);
+ }
+ else
+ {
+ RTPrintf("error: Out of memory. (%d bytes)\n", cb + 1);
+ rc = VERR_NO_MEMORY;
+ }
+ }
+ else
+ RTPrintf("error: script file is too large (0x%llx bytes)\n", cb);
+ }
+ else
+ RTPrintf("error: couldn't get size of script file: %Rrc\n", rc);
+
+ return rc;
+}
+
+
+static DECLCALLBACK(int) loadMem(PVM pVM, RTFILE File, uint64_t *poff)
+{
+ uint64_t off = *poff;
+ RTPrintf("info: loading memory...\n");
+
+ int rc = RTFileSeek(File, off, RTFILE_SEEK_BEGIN, NULL);
+ if (RT_SUCCESS(rc))
+ {
+ RTGCPHYS GCPhys = 0;
+ for (;;)
+ {
+ if (!(GCPhys % (PAGE_SIZE * 0x1000)))
+ RTPrintf("info: %RGp...\n", GCPhys);
+
+ /* read a page from the file */
+ size_t cbRead = 0;
+ uint8_t au8Page[PAGE_SIZE * 16];
+ rc = RTFileRead(File, &au8Page, sizeof(au8Page), &cbRead);
+ if (RT_SUCCESS(rc) && !cbRead)
+ rc = RTFileRead(File, &au8Page, sizeof(au8Page), &cbRead);
+ if (RT_SUCCESS(rc) && !cbRead)
+ rc = VERR_EOF;
+ if (RT_FAILURE(rc) || rc == VINF_EOF)
+ {
+ if (rc == VERR_EOF)
+ rc = VINF_SUCCESS;
+ else
+ RTPrintf("error: Read error %Rrc while reading the raw memory file.\n", rc);
+ break;
+ }
+
+ /* Write that page to the guest - skip known rom areas for now. */
+ if (GCPhys < 0xa0000 || GCPhys >= 0x100000) /* ASSUME size of a8Page is a power of 2. */
+ PGMPhysWrite(pVM, GCPhys, &au8Page, cbRead, PGMACCESSORIGIN_DEBUGGER);
+ GCPhys += cbRead;
+ }
+ }
+ else
+ RTPrintf("error: Failed to seek to 0x%llx in the raw memory file. rc=%Rrc\n", off, rc);
+
+ return rc;
+}
+
+
+/**
+ * Creates the default configuration.
+ * This assumes an empty tree.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the VM.
+ */
+static DECLCALLBACK(int) cfgmR3CreateDefault(PUVM pUVM, PVM pVM, void *pvUser)
+{
+ RT_NOREF1(pUVM);
+ uint64_t cbMem = *(uint64_t *)pvUser;
+ int rc;
+ int rcAll = VINF_SUCCESS;
+ bool fIOAPIC = false;
+#define UPDATERC() do { if (RT_FAILURE(rc) && RT_SUCCESS(rcAll)) rcAll = rc; } while (0)
+
+ /*
+ * Create VM default values.
+ */
+ PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
+ rc = CFGMR3InsertString(pRoot, "Name", "Default VM");
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pRoot, "RamSize", cbMem);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pRoot, "TimerMillies", 10);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pRoot, "RawR3Enabled", 0);
+ UPDATERC();
+ /** @todo CFGM Defaults: RawR0, PATMEnabled and CASMEnabled needs attention later. */
+ rc = CFGMR3InsertInteger(pRoot, "RawR0Enabled", 0);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pRoot, "PATMEnabled", 0);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pRoot, "CSAMEnabled", 0);
+ UPDATERC();
+
+ /*
+ * PDM.
+ */
+ PCFGMNODE pPdm;
+ rc = CFGMR3InsertNode(pRoot, "PDM", &pPdm);
+ UPDATERC();
+ PCFGMNODE pDevices = NULL;
+ rc = CFGMR3InsertNode(pPdm, "Devices", &pDevices);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pDevices, "LoadBuiltin", 1); /* boolean */
+ UPDATERC();
+ PCFGMNODE pDrivers = NULL;
+ rc = CFGMR3InsertNode(pPdm, "Drivers", &pDrivers);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pDrivers, "LoadBuiltin", 1); /* boolean */
+ UPDATERC();
+
+
+ /*
+ * Devices
+ */
+ pDevices = NULL;
+ rc = CFGMR3InsertNode(pRoot, "Devices", &pDevices);
+ UPDATERC();
+ /* device */
+ PCFGMNODE pDev = NULL;
+ PCFGMNODE pInst = NULL;
+ PCFGMNODE pCfg = NULL;
+#if 0
+ PCFGMNODE pLunL0 = NULL;
+ PCFGMNODE pLunL1 = NULL;
+#endif
+
+ /*
+ * PC Arch.
+ */
+ rc = CFGMR3InsertNode(pDevices, "pcarch", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * PC Bios.
+ */
+ rc = CFGMR3InsertNode(pDevices, "pcbios", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "BootDevice0", "IDE");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "BootDevice1", "NONE");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "BootDevice2", "NONE");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "BootDevice3", "NONE");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "HardDiskDevice", "piix3ide");
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "FloppyDevice", "i82078");
+ rc = CFGMR3InsertInteger(pCfg, "IOAPIC", fIOAPIC); UPDATERC();
+ RTUUID Uuid;
+ RTUuidClear(&Uuid);
+ rc = CFGMR3InsertBytes(pCfg, "UUID", &Uuid, sizeof(Uuid)); UPDATERC();
+ /* Bios logo. */
+ rc = CFGMR3InsertInteger(pCfg, "FadeIn", 0);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "FadeOut", 0);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "LogoTime", 0);
+ UPDATERC();
+ rc = CFGMR3InsertString(pCfg, "LogoFile", "");
+ UPDATERC();
+
+ /*
+ * ACPI
+ */
+ rc = CFGMR3InsertNode(pDevices, "acpi", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "IOAPIC", fIOAPIC); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIDeviceNo", 7); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIFunctionNo", 0); UPDATERC();
+
+ /*
+ * DMA
+ */
+ rc = CFGMR3InsertNode(pDevices, "8237A", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */ UPDATERC();
+
+ /*
+ * PCI bus.
+ */
+ rc = CFGMR3InsertNode(pDevices, "pci", &pDev); /* piix3 */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "IOAPIC", fIOAPIC); UPDATERC();
+
+ /*
+ * PS/2 keyboard & mouse
+ */
+ rc = CFGMR3InsertNode(pDevices, "pckbd", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * Floppy
+ */
+ rc = CFGMR3InsertNode(pDevices, "i82078", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "IRQ", 6); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "DMA", 2); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "MemMapped", 0 ); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "IOBase", 0x3f0); UPDATERC();
+
+ /*
+ * i8254 Programmable Interval Timer And Dummy Speaker
+ */
+ rc = CFGMR3InsertNode(pDevices, "i8254", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * i8259 Programmable Interrupt Controller.
+ */
+ rc = CFGMR3InsertNode(pDevices, "i8259", &pDev);
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg);
+ UPDATERC();
+
+ /*
+ * APIC.
+ */
+ rc = CFGMR3InsertNode(pDevices, "apic", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "IOAPIC", fIOAPIC); UPDATERC();
+
+ if (fIOAPIC)
+ {
+ /*
+ * I/O Advanced Programmable Interrupt Controller.
+ */
+ rc = CFGMR3InsertNode(pDevices, "ioapic", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+ }
+
+
+ /*
+ * RTC MC146818.
+ */
+ rc = CFGMR3InsertNode(pDevices, "mc146818", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+
+ /*
+ * VGA.
+ */
+ rc = CFGMR3InsertNode(pDevices, "vga", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIDeviceNo", 2); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIFunctionNo", 0); UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "VRamSize", 8 * _1M); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "CustomVideoModes", 0);
+ rc = CFGMR3InsertInteger(pCfg, "HeightReduction", 0); UPDATERC();
+ //rc = CFGMR3InsertInteger(pCfg, "MonitorCount", 1); UPDATERC();
+
+ /*
+ * IDE controller.
+ */
+ rc = CFGMR3InsertNode(pDevices, "piix3ide", &pDev); /* piix3 */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst);
+ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */
+ UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIDeviceNo", 1); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIFunctionNo", 1); UPDATERC();
+
+ /*
+ * Network card.
+ */
+ rc = CFGMR3InsertNode(pDevices, "pcnet", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIDeviceNo", 3); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIFunctionNo", 0); UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+ rc = CFGMR3InsertInteger(pCfg, "Am79C973", 1); UPDATERC();
+ RTMAC Mac;
+ Mac.au16[0] = 0x0080;
+ Mac.au16[2] = Mac.au16[1] = 0x8086;
+ rc = CFGMR3InsertBytes(pCfg, "MAC", &Mac, sizeof(Mac)); UPDATERC();
+
+ /*
+ * VMM Device
+ */
+ rc = CFGMR3InsertNode(pDevices, "VMMDev", &pDev); UPDATERC();
+ rc = CFGMR3InsertNode(pDev, "0", &pInst); UPDATERC();
+ rc = CFGMR3InsertNode(pInst, "Config", &pCfg); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "Trusted", 1); /* boolean */ UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIDeviceNo", 4); UPDATERC();
+ rc = CFGMR3InsertInteger(pInst, "PCIFunctionNo", 0); UPDATERC();
+
+ /*
+ * ...
+ */
+
+#undef UPDATERC
+ return rcAll;
+}
+
+static void syntax(void)
+{
+ RTPrintf("Syntax: tstAnimate < -r <raw-mem-file> | -z <saved-state> > \n"
+ " [-o <rawmem offset>]\n"
+ " [-s <script file>]\n"
+ " [-m <memory size>]\n"
+ " [-w <warp drive percent>]\n"
+ " [-p]\n"
+ "\n"
+ "The script is on the form:\n"
+ "<reg>=<value>\n");
+}
+
+
+/**
+ * Entry point.
+ */
+extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
+{
+ RT_NOREF1(envp);
+ int rcRet = 1;
+ int rc;
+ RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB);
+
+ /*
+ * Parse input.
+ */
+ if (argc <= 1)
+ {
+ syntax();
+ return 1;
+ }
+
+ bool fPowerOn = false;
+ uint32_t u32WarpDrive = 100; /* % */
+ uint64_t cbMem = ~0ULL;
+ const char *pszSavedState = NULL;
+ const char *pszRawMem = NULL;
+ uint64_t offRawMem = 0;
+ const char *pszScript = NULL;
+ for (int i = 1; i < argc; i++)
+ {
+ if (argv[i][0] == '-')
+ {
+ /* check that it's on short form */
+ if (argv[i][2])
+ {
+ if ( strcmp(argv[i], "--help")
+ && strcmp(argv[i], "-help"))
+ RTPrintf("tstAnimate: Syntax error: Unknown argument '%s'.\n", argv[i]);
+ else
+ syntax();
+ return 1;
+ }
+
+ /* check for 2nd argument */
+ switch (argv[i][1])
+ {
+ case 'r':
+ case 'o':
+ case 'c':
+ case 'm':
+ case 'w':
+ case 'z':
+ if (i + 1 < argc)
+ break;
+ RTPrintf("tstAnimate: Syntax error: '%s' takes a 2nd argument.\n", argv[i]);
+ return 1;
+ }
+
+ /* process argument */
+ switch (argv[i][1])
+ {
+ case 'r':
+ pszRawMem = argv[++i];
+ break;
+
+ case 'z':
+ pszSavedState = argv[++i];
+ break;
+
+ case 'o':
+ {
+ rc = RTStrToUInt64Ex(argv[++i], NULL, 0, &offRawMem);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("tstAnimate: Syntax error: Invalid offset given to -o.\n");
+ return 1;
+ }
+ break;
+ }
+
+ case 'm':
+ {
+ char *pszNext;
+ rc = RTStrToUInt64Ex(argv[++i], &pszNext, 0, &cbMem);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("tstAnimate: Syntax error: Invalid memory size given to -m.\n");
+ return 1;
+ }
+ switch (*pszNext)
+ {
+ case 'G': cbMem *= _1G; pszNext++; break;
+ case 'M': cbMem *= _1M; pszNext++; break;
+ case 'K': cbMem *= _1K; pszNext++; break;
+ case '\0': break;
+ default:
+ RTPrintf("tstAnimate: Syntax error: Invalid memory size given to -m.\n");
+ return 1;
+ }
+ if (*pszNext)
+ {
+ RTPrintf("tstAnimate: Syntax error: Invalid memory size given to -m.\n");
+ return 1;
+ }
+ break;
+ }
+
+ case 's':
+ pszScript = argv[++i];
+ break;
+
+ case 'p':
+ fPowerOn = true;
+ break;
+
+ case 'w':
+ {
+ rc = RTStrToUInt32Ex(argv[++i], NULL, 0, &u32WarpDrive);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("tstAnimate: Syntax error: Invalid number given to -w.\n");
+ return 1;
+ }
+ break;
+ }
+
+ case 'h':
+ case 'H':
+ case '?':
+ syntax();
+ return 1;
+
+ default:
+ RTPrintf("tstAnimate: Syntax error: Unknown argument '%s'.\n", argv[i]);
+ return 1;
+ }
+ }
+ else
+ {
+ RTPrintf("tstAnimate: Syntax error at arg no. %d '%s'.\n", i, argv[i]);
+ syntax();
+ return 1;
+ }
+ }
+
+ /*
+ * Check that the basic requirements are met.
+ */
+ if (pszRawMem && pszSavedState)
+ {
+ RTPrintf("tstAnimate: Syntax error: Either -z or -r, not both.\n");
+ return 1;
+ }
+ if (!pszRawMem && !pszSavedState)
+ {
+ RTPrintf("tstAnimate: Syntax error: The -r argument is compulsory.\n");
+ return 1;
+ }
+
+ /*
+ * Open the files.
+ */
+ RTFILE FileRawMem = NIL_RTFILE;
+ if (pszRawMem)
+ {
+ rc = RTFileOpen(&FileRawMem, pszRawMem, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("tstAnimate: error: Failed to open '%s': %Rrc\n", pszRawMem, rc);
+ return 1;
+ }
+ }
+ RTFILE FileScript = NIL_RTFILE;
+ if (pszScript)
+ {
+ rc = RTFileOpen(&FileScript, pszScript, RTFILE_O_READ | RTFILE_O_OPEN | RTFILE_O_DENY_WRITE);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("tstAnimate: error: Failed to open '%s': %Rrc\n", pszScript, rc);
+ return 1;
+ }
+ }
+
+ /*
+ * Figure the memsize if not specified.
+ */
+ if (cbMem == ~0ULL)
+ {
+ if (FileRawMem != NIL_RTFILE)
+ {
+ rc = RTFileQuerySize(FileRawMem, &cbMem);
+ AssertReleaseRC(rc);
+ cbMem -= offRawMem;
+ cbMem &= ~(PAGE_SIZE - 1);
+ }
+ else
+ {
+ RTPrintf("tstAnimate: error: too lazy to figure out the memsize in a saved state.\n");
+ return 1;
+ }
+ }
+ RTPrintf("tstAnimate: info: cbMem=0x%llx bytes\n", cbMem);
+
+ /*
+ * Open a release log.
+ */
+ static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
+ PRTLOGGER pRelLogger;
+ rc = RTLogCreate(&pRelLogger, RTLOGFLAGS_PREFIX_TIME_PROG, "all", "VBOX_RELEASE_LOG",
+ RT_ELEMENTS(s_apszGroups), s_apszGroups, RTLOGDEST_FILE, "./tstAnimate.log");
+ if (RT_SUCCESS(rc))
+ RTLogRelSetDefaultInstance(pRelLogger);
+ else
+ RTPrintf("tstAnimate: rtLogCreateEx failed - %Rrc\n", rc);
+
+ /*
+ * Create empty VM.
+ */
+ PVM pVM;
+ PUVM pUVM;
+ rc = VMR3Create(1, NULL, NULL, NULL, cfgmR3CreateDefault, &cbMem, &pVM, &pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Load memory.
+ */
+ if (FileRawMem != NIL_RTFILE)
+ rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)loadMem, 3, pVM, FileRawMem, &offRawMem);
+ else
+ rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)SSMR3Load,
+ 7, pVM, pszSavedState, (uintptr_t)NULL /*pStreamOps*/, (uintptr_t)NULL /*pvUser*/,
+ SSMAFTER_DEBUG_IT, (uintptr_t)NULL /*pfnProgress*/, (uintptr_t)NULL /*pvProgressUser*/);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Load register script.
+ */
+ if (FileScript != NIL_RTFILE)
+ rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)scriptRun, 2, pVM, FileScript);
+ if (RT_SUCCESS(rc))
+ {
+ if (fPowerOn)
+ {
+ /*
+ * Adjust warpspeed?
+ */
+ if (u32WarpDrive != 100)
+ {
+ rc = TMR3SetWarpDrive(pUVM, u32WarpDrive);
+ if (RT_FAILURE(rc))
+ RTPrintf("warning: TMVirtualSetWarpDrive(,%u) -> %Rrc\n", u32WarpDrive, rc);
+ }
+
+ /*
+ * Start the thing with single stepping and stuff enabled.
+ * (Try make sure we don't execute anything in raw mode.)
+ */
+ RTPrintf("info: powering on the VM...\n");
+ RTLogGroupSettings(NULL, "+REM_DISAS.e.l.f");
+ rc = VERR_NOT_IMPLEMENTED; /** @todo need some EM single-step indicator (was REMR3DisasEnableStepping) */
+ if (RT_SUCCESS(rc))
+ {
+ rc = EMR3SetExecutionPolicy(pUVM, EMEXECPOLICY_RECOMPILE_RING0, true); AssertReleaseRC(rc);
+ rc = EMR3SetExecutionPolicy(pUVM, EMEXECPOLICY_RECOMPILE_RING3, true); AssertReleaseRC(rc);
+ DBGFR3Info(pUVM, "cpumguest", "verbose", NULL);
+ if (fPowerOn)
+ rc = VMR3PowerOn(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ RTPrintf("info: VM is running\n");
+ signal(SIGINT, SigInterrupt);
+ while (!g_fSignaled)
+ RTThreadSleep(1000);
+ }
+ else
+ RTPrintf("error: Failed to power on the VM: %Rrc\n", rc);
+ }
+ else
+ RTPrintf("error: Failed to enabled singlestepping: %Rrc\n", rc);
+ }
+ else
+ {
+ /*
+ * Don't start it, just enter the debugger.
+ */
+ RTPrintf("info: entering debugger...\n");
+ DBGFR3Info(pUVM, "cpumguest", "verbose", NULL);
+ signal(SIGINT, SigInterrupt);
+ while (!g_fSignaled)
+ RTThreadSleep(1000);
+ }
+ RTPrintf("info: shutting down the VM...\n");
+ }
+ /* execScript complains */
+ }
+ else if (FileRawMem == NIL_RTFILE) /* loadMem complains, SSMR3Load doesn't */
+ RTPrintf("tstAnimate: error: SSMR3Load failed: rc=%Rrc\n", rc);
+ rcRet = RT_SUCCESS(rc) ? 0 : 1;
+
+ /*
+ * Cleanup.
+ */
+ rc = VMR3Destroy(pUVM);
+ if (!RT_SUCCESS(rc))
+ {
+ RTPrintf("tstAnimate: error: failed to destroy vm! rc=%Rrc\n", rc);
+ rcRet++;
+ }
+
+ VMR3ReleaseUVM(pUVM);
+ }
+ else
+ {
+ RTPrintf("tstAnimate: fatal error: failed to create vm! rc=%Rrc\n", rc);
+ rcRet++;
+ }
+
+ return rcRet;
+}
+
+
+#if !defined(VBOX_WITH_HARDENING) || !defined(RT_OS_WINDOWS)
+/**
+ * Main entry point.
+ */
+int main(int argc, char **argv, char **envp)
+{
+ return TrustedMain(argc, argv, envp);
+}
+#endif
+
diff --git a/src/VBox/VMM/testcase/tstAsmStructs.cpp b/src/VBox/VMM/testcase/tstAsmStructs.cpp
new file mode 100644
index 00000000..f3d80f65
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstAsmStructs.cpp
@@ -0,0 +1,54 @@
+/* $Id: tstAsmStructs.cpp $ */
+/** @file
+ * Testcase for checking offsets in the assembly structures shared with C/C++.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/cpum.h>
+#include "CPUMInternal.h"
+#include "HMInternal.h"
+#include "VMMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/hm_vmx.h>
+
+#include "tstHelp.h"
+#include <stdio.h>
+
+
+/* For sup.mac simplifications. */
+#define SUPDRVTRACERUSRCTX32 SUPDRVTRACERUSRCTX
+#define SUPDRVTRACERUSRCTX64 SUPDRVTRACERUSRCTX
+
+
+int main()
+{
+ int rc = 0;
+ printf("tstAsmStructs: TESTING\n");
+
+#ifdef IN_RING3
+# include "tstAsmStructsHC.h"
+#else
+# include "tstAsmStructsRC.h"
+#endif
+
+ if (rc)
+ printf("tstAsmStructs: FAILURE - %d errors \n", rc);
+ else
+ printf("tstAsmStructs: SUCCESS\n");
+ return rc;
+}
diff --git a/src/VBox/VMM/testcase/tstAsmStructsAsm-lst.sed b/src/VBox/VMM/testcase/tstAsmStructsAsm-lst.sed
new file mode 100644
index 00000000..1d416585
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstAsmStructsAsm-lst.sed
@@ -0,0 +1,105 @@
+# $Id: tstAsmStructsAsm-lst.sed $
+## @file
+# For testing assembly struct when using yasm.
+#
+
+#
+# Copyright (C) 2006-2020 Oracle Corporation
+#
+# This file is part of VirtualBox Open Source Edition (OSE), as
+# available from http://www.virtualbox.org. This file is free software;
+# you can redistribute it and/or modify it under the terms of the GNU
+# General Public License (GPL) as published by the Free Software
+# Foundation, in version 2 as it comes in the "COPYING" file of the
+# VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+# hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+#
+
+#
+# Strip stuff lines and spaces we don't care about.
+#
+/ %line /d
+/\[section /d
+/\[bits /d
+/\[absolute /d
+/ times /d
+s/ *[[:digit:]]* //
+/^ *$/d
+s/ *$//g
+s/^ *//g
+/^\.text$/d
+/^\.data$/d
+/^\.bss$/d
+s/[[:space:]][[:space:]]*/ /g
+
+#
+# Figure which type of line this is and process it accordingly.
+#
+/^[[:alpha:]_][[:alnum:]_]*:/b struct
+/^[[:alpha:]_][[:alnum:]_]*_size EQU \$ - .*$/b struct_equ
+/<gap>/b member
+/^\.[[:alpha:]_][[:alnum:]_.:]* res.*$/b member_two
+/^\.[[:alpha:]_][[:alnum:]_.:]*:$/b member_alias
+b error
+b member_two
+
+
+#
+# Struct start / end.
+#
+:struct_equ
+s/_size EQU.*$/_size/
+:struct
+s/:$//
+h
+s/^/global /
+s/$/ ; struct/
+b end
+
+
+#
+# Struct member
+# Note: the 't' command doesn't seem to be working right with 's'.
+#
+:member
+s/[[:xdigit:]]* *//
+s/<gap> *//
+/^\.[[:alnum:]_.]*[:]* .*$/!t error
+s/\(\.[[:alnum:]_]*\)[:]* .*$/\1 /
+G
+s/^\([^ ]*\) \(.*\)$/global \2\1 ; member/
+s/\n//m
+
+b end
+
+
+#
+# Struct member, no address. yasm r1842 and later.
+#
+:member_two
+s/[:]* *res[bwdtq] .*$//
+s/$/ /
+/^\.[[:alnum:]_.]* *$/!t error
+G
+s/^\([^ ]*\) \(.*\)$/global \2\1 ; member2/
+s/\n//m
+
+b end
+
+#
+# Alias member like Host.cr0Fpu in 64-bit. Drop it.
+#
+:member_alias
+d
+b end
+
+:error
+s/^/\nSed script logic error!\nBuffer: /
+s/$/\nHold: /
+G
+q 1
+b end
+
+
+:end
+
diff --git a/src/VBox/VMM/testcase/tstAsmStructsAsm.asm b/src/VBox/VMM/testcase/tstAsmStructsAsm.asm
new file mode 100644
index 00000000..5465c587
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstAsmStructsAsm.asm
@@ -0,0 +1,39 @@
+; $Id: tstAsmStructsAsm.asm $
+;; @file
+; Assembly / C structure layout testcase.
+;
+; Make yasm/nasm create absolute symbols for the structure definition
+; which we can parse and make code from using objdump and sed.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%ifdef RT_ARCH_AMD64
+BITS 64
+%endif
+
+%include "CPUMInternal.mac"
+%include "HMInternal.mac"
+%include "VMMInternal.mac"
+%include "VBox/vmm/cpum.mac"
+%include "VBox/vmm/vm.mac"
+%include "VBox/vmm/hm_vmx.mac"
+%include "VBox/sup.mac"
+%ifdef DO_GLOBALS
+ %include "tstAsmStructsAsm.mac"
+%endif
+
+.text
+.data
+.bss
+
diff --git a/src/VBox/VMM/testcase/tstCFGM.cpp b/src/VBox/VMM/testcase/tstCFGM.cpp
new file mode 100644
index 00000000..85c0c404
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstCFGM.cpp
@@ -0,0 +1,171 @@
+/* $Id: tstCFGM.cpp $ */
+/** @file
+ * Testcase for CFGM.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/sup.h>
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <iprt/initterm.h>
+#include <iprt/stream.h>
+#include <iprt/mem.h>
+#include <iprt/string.h>
+
+#include <iprt/test.h>
+
+
+static void doGeneralTests(PCFGMNODE pRoot)
+{
+ /* test multilevel node creation */
+ PCFGMNODE pChild = NULL;
+ RTTESTI_CHECK_RC_RETV(CFGMR3InsertNode(pRoot, "First/Second/Third//Final", &pChild), VINF_SUCCESS);
+ RTTESTI_CHECK_RETV(RT_VALID_PTR(pChild));
+ RTTESTI_CHECK(CFGMR3GetChild(pRoot, "First/Second/Third/Final") == pChild);
+
+ /*
+ * Boolean queries.
+ */
+ RTTESTI_CHECK_RC(CFGMR3InsertInteger(pChild, "BoolValue", 1), VINF_SUCCESS);
+ bool f = false;
+ RTTESTI_CHECK_RC(CFGMR3QueryBool(pChild, "BoolValue", &f), VINF_SUCCESS);
+ RTTESTI_CHECK(f == true);
+
+ RTTESTI_CHECK_RC(CFGMR3QueryBool(pRoot, "BoolValue", &f), VERR_CFGM_VALUE_NOT_FOUND);
+ RTTESTI_CHECK_RC(CFGMR3QueryBool(NULL, "BoolValue", &f), VERR_CFGM_NO_PARENT);
+
+ RTTESTI_CHECK_RC(CFGMR3QueryBoolDef(pChild, "ValueNotFound", &f, true), VINF_SUCCESS);
+ RTTESTI_CHECK(f == true);
+ RTTESTI_CHECK_RC(CFGMR3QueryBoolDef(pChild, "ValueNotFound", &f, false), VINF_SUCCESS);
+ RTTESTI_CHECK(f == false);
+
+ RTTESTI_CHECK_RC(CFGMR3QueryBoolDef(NULL, "BoolValue", &f, true), VINF_SUCCESS);
+ RTTESTI_CHECK(f == true);
+ RTTESTI_CHECK_RC(CFGMR3QueryBoolDef(NULL, "BoolValue", &f, false), VINF_SUCCESS);
+ RTTESTI_CHECK(f == false);
+
+}
+
+
+
+static void doTestsOnDefaultValues(PCFGMNODE pRoot)
+{
+ /* integer */
+ uint64_t u64;
+ RTTESTI_CHECK_RC(CFGMR3QueryU64(pRoot, "RamSize", &u64), VINF_SUCCESS);
+
+ size_t cb = 0;
+ RTTESTI_CHECK_RC(CFGMR3QuerySize(pRoot, "RamSize", &cb), VINF_SUCCESS);
+ RTTESTI_CHECK(cb == sizeof(uint64_t));
+
+ /* string */
+ char *pszName = NULL;
+ RTTESTI_CHECK_RC(CFGMR3QueryStringAlloc(pRoot, "Name", &pszName), VINF_SUCCESS);
+ RTTESTI_CHECK_RC(CFGMR3QuerySize(pRoot, "Name", &cb), VINF_SUCCESS);
+ RTTESTI_CHECK(cb == strlen(pszName) + 1);
+ MMR3HeapFree(pszName);
+}
+
+
+static void doInVmmTests(RTTEST hTest)
+{
+ /*
+ * Create empty VM structure and init SSM.
+ */
+ int rc = SUPR3Init(NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTTestSkipped(hTest, "SUPR3Init failed with rc=%Rrc", rc);
+ return;
+ }
+
+ PVM pVM;
+ RTTESTI_CHECK_RC_RETV(SUPR3PageAlloc(RT_ALIGN_Z(sizeof(*pVM), PAGE_SIZE) >> PAGE_SHIFT, (void **)&pVM), VINF_SUCCESS);
+
+
+ PUVM pUVM = (PUVM)RTMemPageAlloc(sizeof(*pUVM));
+ pUVM->u32Magic = UVM_MAGIC;
+ pUVM->pVM = pVM;
+ pVM->pUVM = pUVM;
+
+ /*
+ * Do the testing.
+ */
+ RTTESTI_CHECK_RC_RETV(STAMR3InitUVM(pUVM), VINF_SUCCESS);
+ RTTESTI_CHECK_RC_RETV(MMR3InitUVM(pUVM), VINF_SUCCESS);
+ RTTESTI_CHECK_RC_RETV(CFGMR3Init(pVM, NULL, NULL), VINF_SUCCESS);
+ RTTESTI_CHECK_RETV(CFGMR3GetRoot(pVM) != NULL);
+
+ doTestsOnDefaultValues(CFGMR3GetRoot(pVM));
+ doGeneralTests(CFGMR3GetRoot(pVM));
+
+
+ /* done */
+ RTTESTI_CHECK_RC_RETV(CFGMR3Term(pVM), VINF_SUCCESS);
+}
+
+
+static void doStandaloneTests(void)
+{
+ RTTestISub("Standalone");
+ PCFGMNODE pRoot;;
+ RTTESTI_CHECK_RETV((pRoot = CFGMR3CreateTree(NULL)) != NULL);
+ doGeneralTests(pRoot);
+ CFGMR3DestroyTree(pRoot);
+}
+
+
+/**
+ * Entry point.
+ */
+extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
+{
+ RT_NOREF3(argc, argv, envp);
+
+ /*
+ * Init runtime.
+ */
+ RTTEST hTest;
+ RTR3InitExeNoArguments(RTR3INIT_FLAGS_SUPLIB);
+ RTEXITCODE rcExit = RTTestInitAndCreate("tstCFGM", &hTest);
+ if (rcExit != RTEXITCODE_SUCCESS)
+ return rcExit;
+
+ doInVmmTests(hTest);
+ doStandaloneTests();
+
+ return RTTestSummaryAndDestroy(hTest);
+}
+
+
+#if !defined(VBOX_WITH_HARDENING) || !defined(RT_OS_WINDOWS)
+/**
+ * Main entry point.
+ */
+int main(int argc, char **argv, char **envp)
+{
+ return TrustedMain(argc, argv, envp);
+}
+#endif
+
diff --git a/src/VBox/VMM/testcase/tstCompressionBenchmark.cpp b/src/VBox/VMM/testcase/tstCompressionBenchmark.cpp
new file mode 100644
index 00000000..3a04992a
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstCompressionBenchmark.cpp
@@ -0,0 +1,642 @@
+/* $Id: tstCompressionBenchmark.cpp $ */
+/** @file
+ * Compression Benchmark for SSM and PGM.
+ */
+
+/*
+ * Copyright (C) 2009-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/buildconfig.h>
+#include <iprt/crc.h>
+#include <iprt/ctype.h>
+#include <iprt/err.h>
+#include <iprt/file.h>
+#include <iprt/getopt.h>
+#include <iprt/initterm.h>
+#include <iprt/md5.h>
+#include <iprt/sha.h>
+#include <iprt/mem.h>
+#include <iprt/param.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+#include <iprt/zip.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+static size_t g_cPages = 20*_1M / PAGE_SIZE;
+static size_t g_cbPages;
+static uint8_t *g_pabSrc;
+
+/** Buffer for the decompressed data (g_cbPages). */
+static uint8_t *g_pabDecompr;
+
+/** Buffer for the compressed data (g_cbComprAlloc). */
+static uint8_t *g_pabCompr;
+/** The current size of the compressed data, ComprOutCallback */
+static size_t g_cbCompr;
+/** The current offset into the compressed data, DecomprInCallback. */
+static size_t g_offComprIn;
+/** The amount of space allocated for compressed data. */
+static size_t g_cbComprAlloc;
+
+
+/**
+ * Store compressed data in the g_pabCompr buffer.
+ */
+static DECLCALLBACK(int) ComprOutCallback(void *pvUser, const void *pvBuf, size_t cbBuf)
+{
+ NOREF(pvUser);
+ AssertReturn(g_cbCompr + cbBuf <= g_cbComprAlloc, VERR_BUFFER_OVERFLOW);
+ memcpy(&g_pabCompr[g_cbCompr], pvBuf, cbBuf);
+ g_cbCompr += cbBuf;
+ return VINF_SUCCESS;
+}
+
+/**
+ * Read compressed data from g_pabComrp.
+ */
+static DECLCALLBACK(int) DecomprInCallback(void *pvUser, void *pvBuf, size_t cbBuf, size_t *pcbBuf)
+{
+ NOREF(pvUser);
+ size_t cb = RT_MIN(cbBuf, g_cbCompr - g_offComprIn);
+ if (pcbBuf)
+ *pcbBuf = cb;
+// AssertReturn(cb > 0, VERR_EOF);
+ memcpy(pvBuf, &g_pabCompr[g_offComprIn], cb);
+ g_offComprIn += cb;
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Benchmark RTCrc routines potentially relevant for SSM or PGM - All in one go.
+ *
+ * @param pabSrc Pointer to the test data.
+ * @param cbSrc The size of the test data.
+ */
+static void tstBenchmarkCRCsAllInOne(uint8_t const *pabSrc, size_t cbSrc)
+{
+ RTPrintf("Algorithm Speed Time Digest\n"
+ "------------------------------------------------------------------------------\n");
+
+ uint64_t NanoTS = RTTimeNanoTS();
+ uint32_t u32Crc = RTCrc32(pabSrc, cbSrc);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ unsigned uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("CRC-32 %'9u KB/s %'15llu ns - %08x\n", uSpeed, NanoTS, u32Crc);
+
+
+ NanoTS = RTTimeNanoTS();
+ uint64_t u64Crc = RTCrc64(pabSrc, cbSrc);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("CRC-64 %'9u KB/s %'15llu ns - %016llx\n", uSpeed, NanoTS, u64Crc);
+
+ NanoTS = RTTimeNanoTS();
+ u32Crc = RTCrcAdler32(pabSrc, cbSrc);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("Adler-32 %'9u KB/s %'15llu ns - %08x\n", uSpeed, NanoTS, u32Crc);
+
+ NanoTS = RTTimeNanoTS();
+ uint8_t abMd5Hash[RTMD5HASHSIZE];
+ RTMd5(pabSrc, cbSrc, abMd5Hash);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ char szDigest[257];
+ RTMd5ToString(abMd5Hash, szDigest, sizeof(szDigest));
+ RTPrintf("MD5 %'9u KB/s %'15llu ns - %s\n", uSpeed, NanoTS, szDigest);
+
+ NanoTS = RTTimeNanoTS();
+ uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
+ RTSha1(pabSrc, cbSrc, abSha1Hash);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTSha1ToString(abSha1Hash, szDigest, sizeof(szDigest));
+ RTPrintf("SHA-1 %'9u KB/s %'15llu ns - %s\n", uSpeed, NanoTS, szDigest);
+
+ NanoTS = RTTimeNanoTS();
+ uint8_t abSha256Hash[RTSHA256_HASH_SIZE];
+ RTSha256(pabSrc, cbSrc, abSha256Hash);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTSha256ToString(abSha256Hash, szDigest, sizeof(szDigest));
+ RTPrintf("SHA-256 %'9u KB/s %'15llu ns - %s\n", uSpeed, NanoTS, szDigest);
+
+ NanoTS = RTTimeNanoTS();
+ uint8_t abSha512Hash[RTSHA512_HASH_SIZE];
+ RTSha512(pabSrc, cbSrc, abSha512Hash);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTSha512ToString(abSha512Hash, szDigest, sizeof(szDigest));
+ RTPrintf("SHA-512 %'9u KB/s %'15llu ns - %s\n", uSpeed, NanoTS, szDigest);
+}
+
+
+/**
+ * Benchmark RTCrc routines potentially relevant for SSM or PGM - Page by page.
+ *
+ * @param pabSrc Pointer to the test data.
+ * @param cbSrc The size of the test data.
+ */
+static void tstBenchmarkCRCsPageByPage(uint8_t const *pabSrc, size_t cbSrc)
+{
+ RTPrintf("Algorithm Speed Time \n"
+ "----------------------------------------------\n");
+
+ size_t const cPages = cbSrc / PAGE_SIZE;
+
+ uint64_t NanoTS = RTTimeNanoTS();
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ RTCrc32(&pabSrc[iPage * PAGE_SIZE], PAGE_SIZE);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ unsigned uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("CRC-32 %'9u KB/s %'15llu ns\n", uSpeed, NanoTS);
+
+
+ NanoTS = RTTimeNanoTS();
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ RTCrc64(&pabSrc[iPage * PAGE_SIZE], PAGE_SIZE);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("CRC-64 %'9u KB/s %'15llu ns\n", uSpeed, NanoTS);
+
+ NanoTS = RTTimeNanoTS();
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ RTCrcAdler32(&pabSrc[iPage * PAGE_SIZE], PAGE_SIZE);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("Adler-32 %'9u KB/s %'15llu ns\n", uSpeed, NanoTS);
+
+ NanoTS = RTTimeNanoTS();
+ uint8_t abMd5Hash[RTMD5HASHSIZE];
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ RTMd5(&pabSrc[iPage * PAGE_SIZE], PAGE_SIZE, abMd5Hash);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("MD5 %'9u KB/s %'15llu ns\n", uSpeed, NanoTS);
+
+ NanoTS = RTTimeNanoTS();
+ uint8_t abSha1Hash[RTSHA1_HASH_SIZE];
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ RTSha1(&pabSrc[iPage * PAGE_SIZE], PAGE_SIZE, abSha1Hash);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("SHA-1 %'9u KB/s %'15llu ns\n", uSpeed, NanoTS);
+
+ NanoTS = RTTimeNanoTS();
+ uint8_t abSha256Hash[RTSHA256_HASH_SIZE];
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ RTSha256(&pabSrc[iPage * PAGE_SIZE], PAGE_SIZE, abSha256Hash);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("SHA-256 %'9u KB/s %'15llu ns\n", uSpeed, NanoTS);
+
+ NanoTS = RTTimeNanoTS();
+ uint8_t abSha512Hash[RTSHA512_HASH_SIZE];
+ for (uint32_t iPage = 0; iPage < cPages; iPage++)
+ RTSha512(&pabSrc[iPage * PAGE_SIZE], PAGE_SIZE, abSha512Hash);
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ uSpeed = (unsigned)(cbSrc / (long double)NanoTS * 1000000000.0 / 1024);
+ RTPrintf("SHA-512 %'9u KB/s %'15llu ns\n", uSpeed, NanoTS);
+}
+
+
+/** Prints an error message and returns 1 for quick return from main use. */
+static int Error(const char *pszMsgFmt, ...)
+{
+ RTStrmPrintf(g_pStdErr, "\nerror: ");
+ va_list va;
+ va_start(va, pszMsgFmt);
+ RTStrmPrintfV(g_pStdErr, pszMsgFmt, va);
+ va_end(va);
+ return 1;
+}
+
+
+int main(int argc, char **argv)
+{
+ RTR3InitExe(argc, &argv, 0);
+
+ /*
+ * Parse arguments.
+ */
+ static const RTGETOPTDEF s_aOptions[] =
+ {
+ { "--iterations", 'i', RTGETOPT_REQ_UINT32 },
+ { "--num-pages", 'n', RTGETOPT_REQ_UINT32 },
+ { "--page-at-a-time", 'c', RTGETOPT_REQ_UINT32 },
+ { "--page-file", 'f', RTGETOPT_REQ_STRING },
+ { "--offset", 'o', RTGETOPT_REQ_UINT64 },
+ };
+
+ const char *pszPageFile = NULL;
+ uint64_t offPageFile = 0;
+ uint32_t cIterations = 1;
+ uint32_t cPagesAtATime = 1;
+ RTGETOPTUNION Val;
+ RTGETOPTSTATE State;
+ int rc = RTGetOptInit(&State, argc, argv, &s_aOptions[0], RT_ELEMENTS(s_aOptions), 1, 0);
+ AssertRCReturn(rc, 1);
+
+ while ((rc = RTGetOpt(&State, &Val)))
+ {
+ switch (rc)
+ {
+ case 'n':
+ g_cPages = Val.u32;
+ if (g_cPages * PAGE_SIZE * 4 / (PAGE_SIZE * 4) != g_cPages)
+ return Error("The specified page count is too high: %#x (%#llx bytes)\n", g_cPages, (uint64_t)g_cPages * PAGE_SHIFT);
+ if (g_cPages < 1)
+ return Error("The specified page count is too low: %#x\n", g_cPages);
+ break;
+
+ case 'i':
+ cIterations = Val.u32;
+ if (cIterations < 1)
+ return Error("The number of iterations must be 1 or higher\n");
+ break;
+
+ case 'c':
+ cPagesAtATime = Val.u32;
+ if (cPagesAtATime < 1 || cPagesAtATime > 10240)
+ return Error("The specified pages-at-a-time count is out of range: %#x\n", cPagesAtATime);
+ break;
+
+ case 'f':
+ pszPageFile = Val.psz;
+ break;
+
+ case 'o':
+ offPageFile = Val.u64;
+ break;
+
+ case 'O':
+ offPageFile = Val.u64 * PAGE_SIZE;
+ break;
+
+ case 'h':
+ RTPrintf("syntax: tstCompressionBenchmark [options]\n"
+ "\n"
+ "Options:\n"
+ " -h, --help\n"
+ " Show this help page\n"
+ " -i, --iterations <num>\n"
+ " The number of iterations.\n"
+ " -n, --num-pages <pages>\n"
+ " The number of pages.\n"
+ " -c, --pages-at-a-time <pages>\n"
+ " Number of pages at a time.\n"
+ " -f, --page-file <filename>\n"
+ " File or device to read the page from. The default\n"
+ " is to generate some garbage.\n"
+ " -o, --offset <file-offset>\n"
+ " Offset into the page file to start reading at.\n");
+ return 0;
+
+ case 'V':
+ RTPrintf("%sr%s\n", RTBldCfgVersion(), RTBldCfgRevisionStr());
+ return 0;
+
+ default:
+ return RTGetOptPrintError(rc, &Val);
+ }
+ }
+
+ g_cbPages = g_cPages * PAGE_SIZE;
+ uint64_t cbTotal = (uint64_t)g_cPages * PAGE_SIZE * cIterations;
+ uint64_t cbTotalKB = cbTotal / _1K;
+ if (cbTotal / cIterations != g_cbPages)
+ return Error("cPages * cIterations -> overflow\n");
+
+ /*
+ * Gather the test memory.
+ */
+ if (pszPageFile)
+ {
+ size_t cbFile;
+ rc = RTFileReadAllEx(pszPageFile, offPageFile, g_cbPages, RTFILE_RDALL_O_DENY_NONE, (void **)&g_pabSrc, &cbFile);
+ if (RT_FAILURE(rc))
+ return Error("Error reading %zu bytes from %s at %llu: %Rrc\n", g_cbPages, pszPageFile, offPageFile, rc);
+ if (cbFile != g_cbPages)
+ return Error("Error reading %zu bytes from %s at %llu: got %zu bytes\n", g_cbPages, pszPageFile, offPageFile, cbFile);
+ }
+ else
+ {
+ g_pabSrc = (uint8_t *)RTMemAlloc(g_cbPages);
+ if (g_pabSrc)
+ {
+ /* Just fill it with something - warn about the low quality of the something. */
+ RTPrintf("tstCompressionBenchmark: WARNING! No input file was specified so the source\n"
+ "buffer will be filled with generated data of questionable quality.\n");
+#ifdef RT_OS_LINUX
+ RTPrintf("To get real RAM on linux: sudo dd if=/dev/mem ... \n");
+#endif
+ uint8_t *pb = g_pabSrc;
+ uint8_t *pbEnd = &g_pabSrc[g_cbPages];
+ for (; pb != pbEnd; pb += 16)
+ {
+ char szTmp[17];
+ RTStrPrintf(szTmp, sizeof(szTmp), "aaaa%08Xzzzz", (uint32_t)(uintptr_t)pb);
+ memcpy(pb, szTmp, 16);
+ }
+ }
+ }
+
+ g_pabDecompr = (uint8_t *)RTMemAlloc(g_cbPages);
+ g_cbComprAlloc = RT_MAX(g_cbPages * 2, 256 * PAGE_SIZE);
+ g_pabCompr = (uint8_t *)RTMemAlloc(g_cbComprAlloc);
+ if (!g_pabSrc || !g_pabDecompr || !g_pabCompr)
+ return Error("failed to allocate memory buffers (g_cPages=%#x)\n", g_cPages);
+
+ /*
+ * Double loop compressing and uncompressing the data, where the outer does
+ * the specified number of iterations while the inner applies the different
+ * compression algorithms.
+ */
+ struct
+ {
+ /** The time spent decompressing. */
+ uint64_t cNanoDecompr;
+ /** The time spent compressing. */
+ uint64_t cNanoCompr;
+ /** The size of the compressed data. */
+ uint64_t cbCompr;
+ /** First error. */
+ int rc;
+ /** The compression style: block or stream. */
+ bool fBlock;
+ /** Compression type. */
+ RTZIPTYPE enmType;
+ /** Compression level. */
+ RTZIPLEVEL enmLevel;
+ /** Method name. */
+ const char *pszName;
+ } aTests[] =
+ {
+ { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_STORE, RTZIPLEVEL_DEFAULT, "RTZip/Store" },
+ { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_LZF, RTZIPLEVEL_DEFAULT, "RTZip/LZF" },
+/* { 0, 0, 0, VINF_SUCCESS, false, RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT, "RTZip/zlib" }, - slow plus it randomly hits VERR_GENERAL_FAILURE atm. */
+ { 0, 0, 0, VINF_SUCCESS, true, RTZIPTYPE_STORE, RTZIPLEVEL_DEFAULT, "RTZipBlock/Store" },
+ { 0, 0, 0, VINF_SUCCESS, true, RTZIPTYPE_LZF, RTZIPLEVEL_DEFAULT, "RTZipBlock/LZF" },
+ { 0, 0, 0, VINF_SUCCESS, true, RTZIPTYPE_LZJB, RTZIPLEVEL_DEFAULT, "RTZipBlock/LZJB" },
+ { 0, 0, 0, VINF_SUCCESS, true, RTZIPTYPE_LZO, RTZIPLEVEL_DEFAULT, "RTZipBlock/LZO" },
+ };
+ RTPrintf("tstCompressionBenchmark: TESTING..");
+ for (uint32_t i = 0; i < cIterations; i++)
+ {
+ for (uint32_t j = 0; j < RT_ELEMENTS(aTests); j++)
+ {
+ if (RT_FAILURE(aTests[j].rc))
+ continue;
+ memset(g_pabCompr, 0xaa, g_cbComprAlloc);
+ memset(g_pabDecompr, 0xcc, g_cbPages);
+ g_cbCompr = 0;
+ g_offComprIn = 0;
+ RTPrintf("."); RTStrmFlush(g_pStdOut);
+
+ /*
+ * Compress it.
+ */
+ uint64_t NanoTS = RTTimeNanoTS();
+ if (aTests[j].fBlock)
+ {
+ size_t cbLeft = g_cbComprAlloc;
+ uint8_t const *pbSrcPage = g_pabSrc;
+ uint8_t *pbDstPage = g_pabCompr;
+ for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime)
+ {
+ AssertBreakStmt(cbLeft > PAGE_SIZE * 4, aTests[j].rc = rc = VERR_BUFFER_OVERFLOW);
+ uint32_t *pcb = (uint32_t *)pbDstPage;
+ pbDstPage += sizeof(uint32_t);
+ cbLeft -= sizeof(uint32_t);
+ size_t cbSrc = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE;
+ size_t cbDst;
+ rc = RTZipBlockCompress(aTests[j].enmType, aTests[j].enmLevel, 0 /*fFlags*/,
+ pbSrcPage, cbSrc,
+ pbDstPage, cbLeft, &cbDst);
+ if (RT_FAILURE(rc))
+ {
+ Error("RTZipBlockCompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
+ aTests[j].rc = rc;
+ break;
+ }
+ *pcb = (uint32_t)cbDst;
+ cbLeft -= cbDst;
+ pbDstPage += cbDst;
+ pbSrcPage += cbSrc;
+ }
+ if (RT_FAILURE(rc))
+ continue;
+ g_cbCompr = pbDstPage - g_pabCompr;
+ }
+ else
+ {
+ PRTZIPCOMP pZipComp;
+ rc = RTZipCompCreate(&pZipComp, NULL, ComprOutCallback, aTests[j].enmType, aTests[j].enmLevel);
+ if (RT_FAILURE(rc))
+ {
+ Error("Failed to create the compressor for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
+ aTests[j].rc = rc;
+ continue;
+ }
+
+ uint8_t const *pbSrcPage = g_pabSrc;
+ for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime)
+ {
+ size_t cb = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE;
+ rc = RTZipCompress(pZipComp, pbSrcPage, cb);
+ if (RT_FAILURE(rc))
+ {
+ Error("RTZipCompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
+ aTests[j].rc = rc;
+ break;
+ }
+ pbSrcPage += cb;
+ }
+ if (RT_FAILURE(rc))
+ continue;
+ rc = RTZipCompFinish(pZipComp);
+ if (RT_FAILURE(rc))
+ {
+ Error("RTZipCompFinish failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
+ aTests[j].rc = rc;
+ break;
+ }
+ RTZipCompDestroy(pZipComp);
+ }
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ aTests[j].cbCompr += g_cbCompr;
+ aTests[j].cNanoCompr += NanoTS;
+
+ /*
+ * Decompress it.
+ */
+ NanoTS = RTTimeNanoTS();
+ if (aTests[j].fBlock)
+ {
+ uint8_t const *pbSrcPage = g_pabCompr;
+ size_t cbLeft = g_cbCompr;
+ uint8_t *pbDstPage = g_pabDecompr;
+ for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime)
+ {
+ size_t cbDst = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE;
+ size_t cbSrc = *(uint32_t *)pbSrcPage;
+ pbSrcPage += sizeof(uint32_t);
+ cbLeft -= sizeof(uint32_t);
+ rc = RTZipBlockDecompress(aTests[j].enmType, 0 /*fFlags*/,
+ pbSrcPage, cbSrc, &cbSrc,
+ pbDstPage, cbDst, &cbDst);
+ if (RT_FAILURE(rc))
+ {
+ Error("RTZipBlockDecompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
+ aTests[j].rc = rc;
+ break;
+ }
+ pbDstPage += cbDst;
+ cbLeft -= cbSrc;
+ pbSrcPage += cbSrc;
+ }
+ if (RT_FAILURE(rc))
+ continue;
+ }
+ else
+ {
+ PRTZIPDECOMP pZipDecomp;
+ rc = RTZipDecompCreate(&pZipDecomp, NULL, DecomprInCallback);
+ if (RT_FAILURE(rc))
+ {
+ Error("Failed to create the decompressor for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
+ aTests[j].rc = rc;
+ continue;
+ }
+
+ uint8_t *pbDstPage = g_pabDecompr;
+ for (size_t iPage = 0; iPage < g_cPages; iPage += cPagesAtATime)
+ {
+ size_t cb = RT_MIN(g_cPages - iPage, cPagesAtATime) * PAGE_SIZE;
+ rc = RTZipDecompress(pZipDecomp, pbDstPage, cb, NULL);
+ if (RT_FAILURE(rc))
+ {
+ Error("RTZipDecompress failed for '%s' (#%u): %Rrc\n", aTests[j].pszName, j, rc);
+ aTests[j].rc = rc;
+ break;
+ }
+ pbDstPage += cb;
+ }
+ RTZipDecompDestroy(pZipDecomp);
+ if (RT_FAILURE(rc))
+ continue;
+ }
+ NanoTS = RTTimeNanoTS() - NanoTS;
+ aTests[j].cNanoDecompr += NanoTS;
+
+ if (memcmp(g_pabDecompr, g_pabSrc, g_cbPages))
+ {
+ Error("The compressed data doesn't match the source for '%s' (%#u)\n", aTests[j].pszName, j);
+ aTests[j].rc = VERR_BAD_EXE_FORMAT;
+ continue;
+ }
+ }
+ }
+ if (RT_SUCCESS(rc))
+ RTPrintf("\n");
+
+ /*
+ * Report the results.
+ */
+ rc = 0;
+ RTPrintf("tstCompressionBenchmark: BEGIN RESULTS\n");
+ RTPrintf("%-20s Compression Decompression\n", "");
+ RTPrintf("%-20s In Out Ratio Size In Out\n", "Method");
+ RTPrintf("%.20s-----------------------------------------------------------------------------------------\n", "---------------------------------------------");
+ for (uint32_t j = 0; j < RT_ELEMENTS(aTests); j++)
+ {
+ if (RT_SUCCESS(aTests[j].rc))
+ {
+ unsigned uComprSpeedIn = (unsigned)(cbTotalKB / (long double)aTests[j].cNanoCompr * 1000000000.0);
+ unsigned uComprSpeedOut = (unsigned)(aTests[j].cbCompr / (long double)aTests[j].cNanoCompr * 1000000000.0 / 1024);
+ unsigned uRatio = (unsigned)(aTests[j].cbCompr / cIterations * 100 / g_cbPages);
+ unsigned uDecomprSpeedIn = (unsigned)(aTests[j].cbCompr / (long double)aTests[j].cNanoDecompr * 1000000000.0 / 1024);
+ unsigned uDecomprSpeedOut = (unsigned)(cbTotalKB / (long double)aTests[j].cNanoDecompr * 1000000000.0);
+ RTPrintf("%-20s %'9u KB/s %'9u KB/s %3u%% %'11llu bytes %'9u KB/s %'9u KB/s",
+ aTests[j].pszName,
+ uComprSpeedIn, uComprSpeedOut, uRatio, aTests[j].cbCompr / cIterations,
+ uDecomprSpeedIn, uDecomprSpeedOut);
+#if 0
+ RTPrintf(" [%'14llu / %'14llu ns]\n",
+ aTests[j].cNanoCompr / cIterations,
+ aTests[j].cNanoDecompr / cIterations);
+#else
+ RTPrintf("\n");
+#endif
+ }
+ else
+ {
+ RTPrintf("%-20s: %Rrc\n", aTests[j].pszName, aTests[j].rc);
+ rc = 1;
+ }
+ }
+ if (pszPageFile)
+ RTPrintf("Input: %'10zu pages from '%s' starting at offset %'lld (%#llx)\n"
+ " %'11zu bytes\n",
+ g_cPages, pszPageFile, offPageFile, offPageFile, g_cbPages);
+ else
+ RTPrintf("Input: %'10zu pages of generated rubbish %'11zu bytes\n",
+ g_cPages, g_cbPages);
+
+ /*
+ * Count zero pages in the data set.
+ */
+ size_t cZeroPages = 0;
+ for (size_t iPage = 0; iPage < g_cPages; iPage++)
+ {
+ if (ASMMemIsZeroPage(&g_pabSrc[iPage * PAGE_SIZE]))
+ cZeroPages++;
+ }
+ RTPrintf(" %'10zu zero pages (%u %%)\n", cZeroPages, cZeroPages * 100 / g_cPages);
+
+ /*
+ * A little extension to the test, benchmark relevant CRCs.
+ */
+ RTPrintf("\n"
+ "tstCompressionBenchmark: Hash/CRC - All In One\n");
+ tstBenchmarkCRCsAllInOne(g_pabSrc, g_cbPages);
+
+ RTPrintf("\n"
+ "tstCompressionBenchmark: Hash/CRC - Page by Page\n");
+ tstBenchmarkCRCsPageByPage(g_pabSrc, g_cbPages);
+
+ RTPrintf("\n"
+ "tstCompressionBenchmark: Hash/CRC - Zero Page Digest\n");
+ static uint8_t s_abZeroPg[PAGE_SIZE];
+ RT_ZERO(s_abZeroPg);
+ tstBenchmarkCRCsAllInOne(s_abZeroPg, PAGE_SIZE);
+
+ RTPrintf("\n"
+ "tstCompressionBenchmark: Hash/CRC - Zero Half Page Digest\n");
+ tstBenchmarkCRCsAllInOne(s_abZeroPg, PAGE_SIZE / 2);
+
+ RTPrintf("tstCompressionBenchmark: END RESULTS\n");
+
+ return rc;
+}
+
diff --git a/src/VBox/VMM/testcase/tstGlobalConfig.cpp b/src/VBox/VMM/testcase/tstGlobalConfig.cpp
new file mode 100644
index 00000000..efddf841
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstGlobalConfig.cpp
@@ -0,0 +1,138 @@
+/* $Id: tstGlobalConfig.cpp $ */
+/** @file
+ * Ring-3 Management program for the GCFGM mock-up.
+ */
+
+/*
+ * Copyright (C) 2007-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/vmm.h>
+#include <iprt/errcore.h>
+#include <iprt/assert.h>
+#include <iprt/initterm.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+
+
+/**
+ * Prints the usage and returns 1.
+ * @return 1
+ */
+static int Usage(void)
+{
+ RTPrintf("usage: tstGlobalConfig <value-name> [new value]\n");
+ return 1;
+}
+
+
+/**
+ * Entry point.
+ */
+extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
+{
+ RT_NOREF1(envp);
+ RTR3InitExe(argc, &argv, 0);
+
+ /*
+ * Parse args, building the request as we do so.
+ */
+ if (argc <= 1)
+ return Usage();
+ if (argc > 3)
+ {
+ RTPrintf("syntax error: too many arguments\n");
+ Usage();
+ return 1;
+ }
+
+ VMMR0OPERATION enmOp = VMMR0_DO_GCFGM_QUERY_VALUE;
+ GCFGMVALUEREQ Req;
+ memset(&Req, 0, sizeof(Req));
+ Req.Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC;
+ Req.Hdr.cbReq = sizeof(Req);
+
+ /* arg[1] = szName */
+ size_t cch = strlen(argv[1]);
+ if (cch < 2 || argv[1][0] != '/')
+ {
+ RTPrintf("syntax error: malformed name '%s'\n", argv[1]);
+ return 1;
+ }
+ if (cch >= sizeof(Req.szName))
+ {
+ RTPrintf("syntax error: the name '%s' is too long. (max %zu chars)\n", argv[1], sizeof(Req.szName) - 1);
+ return 1;
+ }
+ memcpy(&Req.szName[0], argv[1], cch + 1);
+
+ /* argv[2] = u64SetValue; optional */
+ if (argc == 3)
+ {
+ char *pszNext = NULL;
+ int rc = RTStrToUInt64Ex(argv[2], &pszNext, 0, &Req.u64Value);
+ if (RT_FAILURE(rc) || *pszNext)
+ {
+ RTPrintf("syntax error: '%s' didn't convert successfully to a number. (%Rrc,'%s')\n", argv[2], rc, pszNext);
+ return 1;
+ }
+ enmOp = VMMR0_DO_GCFGM_SET_VALUE;
+ }
+
+ /*
+ * Open the session, load ring-0 and issue the request.
+ */
+ PSUPDRVSESSION pSession;
+ int rc = SUPR3Init(&pSession);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("tstGlobalConfig: SUPR3Init -> %Rrc\n", rc);
+ return 1;
+ }
+
+ rc = SUPR3LoadVMM("./VMMR0.r0", NULL /*pErrInfo*/);
+ if (RT_SUCCESS(rc))
+ {
+ Req.pSession = pSession;
+ rc = SUPR3CallVMMR0Ex(NIL_RTR0PTR, NIL_VMCPUID, enmOp, 0, &Req.Hdr);
+ if (RT_SUCCESS(rc))
+ {
+ if (enmOp == VMMR0_DO_GCFGM_QUERY_VALUE)
+ RTPrintf("%s = %RU64 (%#RX64)\n", Req.szName, Req.u64Value, Req.u64Value);
+ else
+ RTPrintf("Successfully set %s = %RU64 (%#RX64)\n", Req.szName, Req.u64Value, Req.u64Value);
+ }
+ else if (enmOp == VMMR0_DO_GCFGM_QUERY_VALUE)
+ RTPrintf("error: Failed to query '%s', rc=%Rrc\n", Req.szName, rc);
+ else
+ RTPrintf("error: Failed to set '%s' to %RU64, rc=%Rrc\n", Req.szName, Req.u64Value, rc);
+
+ }
+ SUPR3Term(false /*fForced*/);
+
+ return RT_FAILURE(rc) ? 1 : 0;
+}
+
+
+#if !defined(VBOX_WITH_HARDENING) || !defined(RT_OS_WINDOWS)
+/**
+ * Main entry point.
+ */
+int main(int argc, char **argv, char **envp)
+{
+ return TrustedMain(argc, argv, envp);
+}
+#endif
+
diff --git a/src/VBox/VMM/testcase/tstHelp.h b/src/VBox/VMM/testcase/tstHelp.h
new file mode 100644
index 00000000..618c5a05
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstHelp.h
@@ -0,0 +1,169 @@
+/* $Id: tstHelp.h $ */
+/** @file
+ * VMM testcase - Helper stuff.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VMM_INCLUDED_SRC_testcase_tstHelp_h
+#define VMM_INCLUDED_SRC_testcase_tstHelp_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+#include <VBox/cdefs.h>
+#include <VBox/vmm/cpum.h>
+
+RT_C_DECLS_BEGIN
+void tstDumpCtx(PCPUMCTX pCtx, const char *pszComment);
+RT_C_DECLS_END
+
+
+/**
+ * Checks the offset of a data member.
+ * @param type Type.
+ * @param off Correct offset.
+ * @param m Member name.
+ */
+#define CHECK_OFF(type, off, m) \
+ do { \
+ if (off != RT_OFFSETOF(type, m)) \
+ { \
+ printf("error! %#010x %s Off by %d!! (expected off=%#x)\n", \
+ RT_OFFSETOF(type, m), #type "." #m, off - RT_OFFSETOF(type, m), (int)off); \
+ rc++; \
+ } \
+ /*else */ \
+ /*printf("%#08x %s\n", RT_OFFSETOF(type, m), #m);*/ \
+ } while (0)
+
+/**
+ * Checks the size of type.
+ * @param type Type.
+ * @param size Correct size.
+ */
+#define CHECK_SIZE(type, size) \
+ do { \
+ if (size != sizeof(type)) \
+ { \
+ printf("error! sizeof(%s): %#x (%d) Off by %d!! (expected %#x)\n", \
+ #type, (int)sizeof(type), (int)sizeof(type), (int)sizeof(type) - (int)size, (int)size); \
+ rc++; \
+ } \
+ else \
+ printf("info: sizeof(%s): %#x (%d)\n", #type, (int)sizeof(type), (int)sizeof(type)); \
+ } while (0)
+
+/**
+ * Checks the alignment of a struct member.
+ */
+#define CHECK_MEMBER_ALIGNMENT(strct, member, align) \
+ do \
+ { \
+ if (RT_UOFFSETOF(strct, member) & ((align) - 1) ) \
+ { \
+ printf("error! %s::%s offset=%#x (%u) expected alignment %#x, meaning %#x (%u) off\n", \
+ #strct, #member, \
+ (unsigned)RT_OFFSETOF(strct, member), \
+ (unsigned)RT_OFFSETOF(strct, member), \
+ (unsigned)(align), \
+ (unsigned)(((align) - RT_OFFSETOF(strct, member)) & ((align) - 1)), \
+ (unsigned)(((align) - RT_OFFSETOF(strct, member)) & ((align) - 1)) ); \
+ rc++; \
+ } \
+ } while (0)
+
+/**
+ * Checks that the size of a type is aligned correctly.
+ */
+#define CHECK_SIZE_ALIGNMENT(type, align) \
+ do { \
+ if (RT_ALIGN_Z(sizeof(type), (align)) != sizeof(type)) \
+ { \
+ printf("error! %s size=%#x (%u), align=%#x %#x (%u) bytes off\n", \
+ #type, \
+ (unsigned)sizeof(type), \
+ (unsigned)sizeof(type), \
+ (align), \
+ (unsigned)RT_ALIGN_Z(sizeof(type), align) - (unsigned)sizeof(type), \
+ (unsigned)RT_ALIGN_Z(sizeof(type), align) - (unsigned)sizeof(type)); \
+ rc++; \
+ } \
+ } while (0)
+
+/**
+ * Checks that a internal struct padding is big enough.
+ */
+#define CHECK_PADDING(strct, member, align) \
+ do \
+ { \
+ strct *p = NULL; NOREF(p); \
+ if (sizeof(p->member.s) > sizeof(p->member.padding)) \
+ { \
+ printf("error! padding of %s::%s is too small, padding=%d struct=%d correct=%d\n", #strct, #member, \
+ (int)sizeof(p->member.padding), (int)sizeof(p->member.s), (int)RT_ALIGN_Z(sizeof(p->member.s), (align))); \
+ rc++; \
+ } \
+ else if (RT_ALIGN_Z(sizeof(p->member.padding), (align)) != sizeof(p->member.padding)) \
+ { \
+ printf("error! padding of %s::%s is misaligned, padding=%d correct=%d\n", #strct, #member, \
+ (int)sizeof(p->member.padding), (int)RT_ALIGN_Z(sizeof(p->member.s), (align))); \
+ rc++; \
+ } \
+ } while (0)
+
+/**
+ * Checks that a internal struct padding is big enough.
+ */
+#define CHECK_PADDING2(strct) \
+ do \
+ { \
+ strct *p = NULL; NOREF(p); \
+ if (sizeof(p->s) > sizeof(p->padding)) \
+ { \
+ printf("error! padding of %s is too small, padding=%d struct=%d correct=%d\n", #strct, \
+ (int)sizeof(p->padding), (int)sizeof(p->s), (int)RT_ALIGN_Z(sizeof(p->s), 64)); \
+ rc++; \
+ } \
+ } while (0)
+
+/**
+ * Checks that a internal struct padding is big enough.
+ */
+#define CHECK_PADDING3(strct, member, pad_member) \
+ do \
+ { \
+ strct *p = NULL; NOREF(p); \
+ if (sizeof(p->member) > sizeof(p->pad_member)) \
+ { \
+ printf("error! padding of %s::%s is too small, padding=%d struct=%d\n", #strct, #member, \
+ (int)sizeof(p->pad_member), (int)sizeof(p->member)); \
+ rc++; \
+ } \
+ } while (0)
+
+/**
+ * Checks that an expression is true.
+ */
+#define CHECK_EXPR(expr) \
+ do \
+ { \
+ if (!(expr)) \
+ { \
+ printf("error! '%s' failed! (line %d)\n", #expr, __LINE__); \
+ rc++; \
+ } \
+ } while (0)
+
+
+#endif /* !VMM_INCLUDED_SRC_testcase_tstHelp_h */
diff --git a/src/VBox/VMM/testcase/tstIEMCheckMc.cpp b/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
new file mode 100644
index 00000000..71c4faa6
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstIEMCheckMc.cpp
@@ -0,0 +1,769 @@
+/* $Id: tstIEMCheckMc.cpp $ */
+/** @file
+ * IEM Testcase - Check the "Microcode".
+ */
+
+/*
+ * Copyright (C) 2011-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define VMCPU_INCL_CPUM_GST_CTX
+#include <iprt/assert.h>
+#include <iprt/rand.h>
+#include <iprt/test.h>
+
+#include <VBox/types.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#define IN_TSTVMSTRUCT 1
+#include "../include/IEMInternal.h"
+#include <VBox/vmm/vm.h>
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+bool volatile g_fRandom;
+uint8_t volatile g_bRandom;
+RTUINT128U g_u128Zero;
+
+
+/** For hacks. */
+#define TST_IEM_CHECK_MC
+
+#define CHK_TYPE(a_ExpectedType, a_Param) \
+ do { a_ExpectedType const * pCheckType = &(a_Param); NOREF(pCheckType); } while (0)
+#define CHK_PTYPE(a_ExpectedType, a_Param) \
+ do { a_ExpectedType pCheckType = (a_Param); NOREF(pCheckType); } while (0)
+
+#define CHK_CONST(a_ExpectedType, a_Const) \
+ do { \
+ AssertCompile(((a_Const) >> 1) == ((a_Const) >> 1)); \
+ AssertCompile((a_ExpectedType)(a_Const) == (a_Const)); \
+ } while (0)
+
+#define CHK_SINGLE_BIT(a_ExpectedType, a_fBitMask) \
+ do { \
+ CHK_CONST(a_ExpectedType, a_fBitMask); \
+ AssertCompile(RT_IS_POWER_OF_TWO(a_fBitMask)); \
+ } while (0)
+
+#define CHK_GCPTR(a_EffAddr) \
+ CHK_TYPE(RTGCPTR, a_EffAddr)
+
+#define CHK_SEG_IDX(a_iSeg) \
+ do { \
+ uint8_t iMySeg = (a_iSeg); NOREF(iMySeg); /** @todo const or variable. grr. */ \
+ } while (0)
+
+#define CHK_CALL_ARG(a_Name, a_iArg) \
+ do { RT_CONCAT3(iArgCheck_,a_iArg,a_Name) = 1; } while (0)
+
+
+/** @name Other stubs.
+ * @{ */
+
+typedef VBOXSTRICTRC (* PFNIEMOP)(PVMCPU pVCpu);
+#define FNIEMOP_DEF(a_Name) \
+ static VBOXSTRICTRC a_Name(PVMCPU pVCpu) RT_NO_THROW_DEF
+#define FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) \
+ static VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0) RT_NO_THROW_DEF
+#define FNIEMOP_DEF_2(a_Name, a_Type0, a_Name0, a_Type1, a_Name1) \
+ static VBOXSTRICTRC a_Name(PVMCPU pVCpu, a_Type0 a_Name0, a_Type1 a_Name1) RT_NO_THROW_DEF
+
+typedef VBOXSTRICTRC (* PFNIEMOPRM)(PVMCPU pVCpu, uint8_t bRm);
+#define FNIEMOPRM_DEF(a_Name) \
+ static VBOXSTRICTRC a_Name(PVMCPU pVCpu, uint8_t bRm) RT_NO_THROW_DEF
+
+#define IEM_NOT_REACHED_DEFAULT_CASE_RET() default: return VERR_IPE_NOT_REACHED_DEFAULT_CASE
+#define IEM_RETURN_ASPECT_NOT_IMPLEMENTED() return IEM_RETURN_ASPECT_NOT_IMPLEMENTED
+#define IEM_RETURN_ASPECT_NOT_IMPLEMENTED_LOG(a_LoggerArgs) return IEM_RETURN_ASPECT_NOT_IMPLEMENTED
+
+
+#define IEM_OPCODE_GET_NEXT_RM(a_pu8) do { *(a_pu8) = g_bRandom; CHK_PTYPE(uint8_t *, a_pu8); } while (0)
+#define IEM_OPCODE_GET_NEXT_U8(a_pu8) do { *(a_pu8) = g_bRandom; CHK_PTYPE(uint8_t *, a_pu8); } while (0)
+#define IEM_OPCODE_GET_NEXT_S8(a_pi8) do { *(a_pi8) = g_bRandom; CHK_PTYPE(int8_t *, a_pi8); } while (0)
+#define IEM_OPCODE_GET_NEXT_S8_SX_U16(a_pu16) do { *(a_pu16) = g_bRandom; CHK_PTYPE(uint16_t *, a_pu16); } while (0)
+#define IEM_OPCODE_GET_NEXT_S8_SX_U32(a_pu32) do { *(a_pu32) = g_bRandom; CHK_PTYPE(uint32_t *, a_pu32); } while (0)
+#define IEM_OPCODE_GET_NEXT_S8_SX_U64(a_pu64) do { *(a_pu64) = g_bRandom; CHK_PTYPE(uint64_t *, a_pu64); } while (0)
+#define IEM_OPCODE_GET_NEXT_U16(a_pu16) do { *(a_pu16) = g_bRandom; CHK_PTYPE(uint16_t *, a_pu16); } while (0)
+#define IEM_OPCODE_GET_NEXT_U16_ZX_U32(a_pu32) do { *(a_pu32) = g_bRandom; CHK_PTYPE(uint32_t *, a_pu32); } while (0)
+#define IEM_OPCODE_GET_NEXT_U16_ZX_U64(a_pu64) do { *(a_pu64) = g_bRandom; CHK_PTYPE(uint64_t *, a_pu64); } while (0)
+#define IEM_OPCODE_GET_NEXT_S16(a_pi16) do { *(a_pi16) = g_bRandom; CHK_PTYPE(int16_t *, a_pi16); } while (0)
+#define IEM_OPCODE_GET_NEXT_U32(a_pu32) do { *(a_pu32) = g_bRandom; CHK_PTYPE(uint32_t *, a_pu32); } while (0)
+#define IEM_OPCODE_GET_NEXT_U32_ZX_U64(a_pu64) do { *(a_pu64) = g_bRandom; CHK_PTYPE(uint64_t *, a_pu64); } while (0)
+#define IEM_OPCODE_GET_NEXT_S32(a_pi32) do { *(a_pi32) = g_bRandom; CHK_PTYPE(int32_t *, a_pi32); } while (0)
+#define IEM_OPCODE_GET_NEXT_S32_SX_U64(a_pu64) do { *(a_pu64) = g_bRandom; CHK_PTYPE(uint64_t *, a_pu64); } while (0)
+#define IEM_OPCODE_GET_NEXT_U64(a_pu64) do { *(a_pu64) = g_bRandom; CHK_PTYPE(uint64_t *, a_pu64); } while (0)
+#define IEMOP_HLP_MIN_186() do { } while (0)
+#define IEMOP_HLP_MIN_286() do { } while (0)
+#define IEMOP_HLP_MIN_386() do { } while (0)
+#define IEMOP_HLP_MIN_386_EX(a_fTrue) do { } while (0)
+#define IEMOP_HLP_MIN_486() do { } while (0)
+#define IEMOP_HLP_MIN_586() do { } while (0)
+#define IEMOP_HLP_MIN_686() do { } while (0)
+#define IEMOP_HLP_NO_REAL_OR_V86_MODE() do { } while (0)
+#define IEMOP_HLP_NO_64BIT() do { } while (0)
+#define IEMOP_HLP_ONLY_64BIT() do { } while (0)
+#define IEMOP_HLP_64BIT_OP_SIZE() do { } while (0)
+#define IEMOP_HLP_DEFAULT_64BIT_OP_SIZE() do { } while (0)
+#define IEMOP_HLP_CLEAR_REX_NOT_BEFORE_OPCODE(a_szPrf) do { } while (0)
+#define IEMOP_HLP_DONE_DECODING_NO_LOCK_PREFIX() do { } while (0)
+#define IEMOP_HLP_DONE_VEX_DECODING() do { } while (0)
+#define IEMOP_HLP_DONE_VEX_DECODING_L0() do { } while (0)
+#define IEMOP_HLP_DONE_VEX_DECODING_NO_VVVV() do { } while (0)
+#define IEMOP_HLP_DONE_VEX_DECODING_L0_AND_NO_VVVV() do { } while (0)
+#define IEMOP_HLP_DONE_DECODING_NO_LOCK_REPZ_OR_REPNZ_PREFIXES() do { } while (0)
+#define IEMOP_HLP_DONE_DECODING_NO_SIZE_OP_REPZ_OR_REPNZ_PREFIXES() do { } while (0)
+#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
+# define IEMOP_HLP_VMX_INSTR(a_szInstr, a_InsDiagPrefix) do { } while (0)
+# define IEMOP_HLP_IN_VMX_OPERATION(a_szInstr, a_InsDiagPrefix) do { } while (0)
+#endif
+
+
+#define IEMOP_HLP_DONE_DECODING() do { } while (0)
+
+#define IEMOP_HLP_DECODED_NL_1(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_fDisOpType) do { } while (0)
+#define IEMOP_HLP_DECODED_NL_2(a_uDisOpNo, a_fIemOpFlags, a_uDisParam0, a_uDisParam1, a_fDisOpType) do { } while (0)
+#define IEMOP_RAISE_DIVIDE_ERROR() VERR_TRPM_ACTIVE_TRAP
+#define IEMOP_RAISE_INVALID_OPCODE() VERR_TRPM_ACTIVE_TRAP
+#define IEMOP_RAISE_INVALID_LOCK_PREFIX() VERR_TRPM_ACTIVE_TRAP
+#define IEMOP_MNEMONIC(a_Stats, a_szMnemonic) do { } while (0)
+#define IEMOP_MNEMONIC0EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC1EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC2EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC3EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC4EX(a_Stats, a_szMnemonic, a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_Op4, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC0(a_Form, a_Upper, a_Lower, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC1(a_Form, a_Upper, a_Lower, a_Op1, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC2(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC3(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_MNEMONIC4(a_Form, a_Upper, a_Lower, a_Op1, a_Op2, a_Op3, a_fDisHints, a_fIemHints) do { } while (0)
+#define IEMOP_BITCH_ABOUT_STUB() do { } while (0)
+#define FNIEMOP_STUB(a_Name) \
+ FNIEMOP_DEF(a_Name) { return VERR_NOT_IMPLEMENTED; } \
+ typedef int ignore_semicolon
+#define FNIEMOP_STUB_1(a_Name, a_Type0, a_Name0) \
+ FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) { return VERR_NOT_IMPLEMENTED; } \
+ typedef int ignore_semicolon
+
+#define FNIEMOP_UD_STUB(a_Name) \
+ FNIEMOP_DEF(a_Name) { return IEMOP_RAISE_INVALID_OPCODE(); } \
+ typedef int ignore_semicolon
+#define FNIEMOP_UD_STUB_1(a_Name, a_Type0, a_Name0) \
+ FNIEMOP_DEF_1(a_Name, a_Type0, a_Name0) { return IEMOP_RAISE_INVALID_OPCODE(); } \
+ typedef int ignore_semicolon
+
+
+#define FNIEMOP_CALL(a_pfn) (a_pfn)(pVCpu)
+#define FNIEMOP_CALL_1(a_pfn, a0) (a_pfn)(pVCpu, a0)
+#define FNIEMOP_CALL_2(a_pfn, a0, a1) (a_pfn)(pVCpu, a0, a1)
+
+#define IEM_IS_REAL_OR_V86_MODE(a_pVCpu) (g_fRandom)
+#define IEM_IS_LONG_MODE(a_pVCpu) (g_fRandom)
+#define IEM_IS_REAL_MODE(a_pVCpu) (g_fRandom)
+#define IEM_IS_GUEST_CPU_AMD(a_pVCpu) (g_fRandom)
+#define IEM_IS_GUEST_CPU_INTEL(a_pVCpu) (g_fRandom)
+#define IEM_GET_GUEST_CPU_FEATURES(a_pVCpu) ((PCCPUMFEATURES)(uintptr_t)42)
+#define IEM_GET_HOST_CPU_FEATURES(a_pVCpu) ((PCCPUMFEATURES)(uintptr_t)88)
+
+#define iemRecalEffOpSize(a_pVCpu) do { } while (0)
+
+IEMOPBINSIZES g_iemAImpl_add;
+IEMOPBINSIZES g_iemAImpl_adc;
+IEMOPBINSIZES g_iemAImpl_sub;
+IEMOPBINSIZES g_iemAImpl_sbb;
+IEMOPBINSIZES g_iemAImpl_or;
+IEMOPBINSIZES g_iemAImpl_xor;
+IEMOPBINSIZES g_iemAImpl_and;
+IEMOPBINSIZES g_iemAImpl_cmp;
+IEMOPBINSIZES g_iemAImpl_test;
+IEMOPBINSIZES g_iemAImpl_bt;
+IEMOPBINSIZES g_iemAImpl_btc;
+IEMOPBINSIZES g_iemAImpl_btr;
+IEMOPBINSIZES g_iemAImpl_bts;
+IEMOPBINSIZES g_iemAImpl_bsf;
+IEMOPBINSIZES g_iemAImpl_bsr;
+IEMOPBINSIZES g_iemAImpl_imul_two;
+PCIEMOPBINSIZES g_apIemImplGrp1[8];
+IEMOPUNARYSIZES g_iemAImpl_inc;
+IEMOPUNARYSIZES g_iemAImpl_dec;
+IEMOPUNARYSIZES g_iemAImpl_neg;
+IEMOPUNARYSIZES g_iemAImpl_not;
+IEMOPSHIFTSIZES g_iemAImpl_rol;
+IEMOPSHIFTSIZES g_iemAImpl_ror;
+IEMOPSHIFTSIZES g_iemAImpl_rcl;
+IEMOPSHIFTSIZES g_iemAImpl_rcr;
+IEMOPSHIFTSIZES g_iemAImpl_shl;
+IEMOPSHIFTSIZES g_iemAImpl_shr;
+IEMOPSHIFTSIZES g_iemAImpl_sar;
+IEMOPMULDIVSIZES g_iemAImpl_mul;
+IEMOPMULDIVSIZES g_iemAImpl_imul;
+IEMOPMULDIVSIZES g_iemAImpl_div;
+IEMOPMULDIVSIZES g_iemAImpl_idiv;
+IEMOPSHIFTDBLSIZES g_iemAImpl_shld;
+IEMOPSHIFTDBLSIZES g_iemAImpl_shrd;
+IEMOPMEDIAF1L1 g_iemAImpl_punpcklbw;
+IEMOPMEDIAF1L1 g_iemAImpl_punpcklwd;
+IEMOPMEDIAF1L1 g_iemAImpl_punpckldq;
+IEMOPMEDIAF1L1 g_iemAImpl_punpcklqdq;
+IEMOPMEDIAF1H1 g_iemAImpl_punpckhbw;
+IEMOPMEDIAF1H1 g_iemAImpl_punpckhwd;
+IEMOPMEDIAF1H1 g_iemAImpl_punpckhdq;
+IEMOPMEDIAF1H1 g_iemAImpl_punpckhqdq;
+IEMOPMEDIAF2 g_iemAImpl_pxor;
+IEMOPMEDIAF2 g_iemAImpl_pcmpeqb;
+IEMOPMEDIAF2 g_iemAImpl_pcmpeqw;
+IEMOPMEDIAF2 g_iemAImpl_pcmpeqd;
+
+
+#define iemAImpl_idiv_u8 ((PFNIEMAIMPLMULDIVU8)0)
+#define iemAImpl_div_u8 ((PFNIEMAIMPLMULDIVU8)0)
+#define iemAImpl_imul_u8 ((PFNIEMAIMPLMULDIVU8)0)
+#define iemAImpl_mul_u8 ((PFNIEMAIMPLMULDIVU8)0)
+
+#define iemAImpl_fpu_r32_to_r80 NULL
+#define iemAImpl_fcom_r80_by_r32 NULL
+#define iemAImpl_fadd_r80_by_r32 NULL
+#define iemAImpl_fmul_r80_by_r32 NULL
+#define iemAImpl_fsub_r80_by_r32 NULL
+#define iemAImpl_fsubr_r80_by_r32 NULL
+#define iemAImpl_fdiv_r80_by_r32 NULL
+#define iemAImpl_fdivr_r80_by_r32 NULL
+
+#define iemAImpl_fpu_r64_to_r80 NULL
+#define iemAImpl_fadd_r80_by_r64 NULL
+#define iemAImpl_fmul_r80_by_r64 NULL
+#define iemAImpl_fcom_r80_by_r64 NULL
+#define iemAImpl_fsub_r80_by_r64 NULL
+#define iemAImpl_fsubr_r80_by_r64 NULL
+#define iemAImpl_fdiv_r80_by_r64 NULL
+#define iemAImpl_fdivr_r80_by_r64 NULL
+
+#define iemAImpl_fadd_r80_by_r80 NULL
+#define iemAImpl_fmul_r80_by_r80 NULL
+#define iemAImpl_fsub_r80_by_r80 NULL
+#define iemAImpl_fsubr_r80_by_r80 NULL
+#define iemAImpl_fdiv_r80_by_r80 NULL
+#define iemAImpl_fdivr_r80_by_r80 NULL
+#define iemAImpl_fprem_r80_by_r80 NULL
+#define iemAImpl_fprem1_r80_by_r80 NULL
+#define iemAImpl_fscale_r80_by_r80 NULL
+
+#define iemAImpl_fpatan_r80_by_r80 NULL
+#define iemAImpl_fyl2x_r80_by_r80 NULL
+#define iemAImpl_fyl2xp1_r80_by_r80 NULL
+
+#define iemAImpl_fcom_r80_by_r80 NULL
+#define iemAImpl_fucom_r80_by_r80 NULL
+#define iemAImpl_fabs_r80 NULL
+#define iemAImpl_fchs_r80 NULL
+#define iemAImpl_ftst_r80 NULL
+#define iemAImpl_fxam_r80 NULL
+#define iemAImpl_f2xm1_r80 NULL
+#define iemAImpl_fsqrt_r80 NULL
+#define iemAImpl_frndint_r80 NULL
+#define iemAImpl_fsin_r80 NULL
+#define iemAImpl_fcos_r80 NULL
+
+#define iemAImpl_fld1 NULL
+#define iemAImpl_fldl2t NULL
+#define iemAImpl_fldl2e NULL
+#define iemAImpl_fldpi NULL
+#define iemAImpl_fldlg2 NULL
+#define iemAImpl_fldln2 NULL
+#define iemAImpl_fldz NULL
+
+#define iemAImpl_fptan_r80_r80 NULL
+#define iemAImpl_fxtract_r80_r80 NULL
+#define iemAImpl_fsincos_r80_r80 NULL
+
+#define iemAImpl_fiadd_r80_by_i16 NULL
+#define iemAImpl_fimul_r80_by_i16 NULL
+#define iemAImpl_fisub_r80_by_i16 NULL
+#define iemAImpl_fisubr_r80_by_i16 NULL
+#define iemAImpl_fidiv_r80_by_i16 NULL
+#define iemAImpl_fidivr_r80_by_i16 NULL
+
+#define iemAImpl_fiadd_r80_by_i32 NULL
+#define iemAImpl_fimul_r80_by_i32 NULL
+#define iemAImpl_fisub_r80_by_i32 NULL
+#define iemAImpl_fisubr_r80_by_i32 NULL
+#define iemAImpl_fidiv_r80_by_i32 NULL
+#define iemAImpl_fidivr_r80_by_i32 NULL
+
+#define iemCImpl_callf NULL
+#define iemCImpl_FarJmp NULL
+
+#define iemAImpl_pshufhw NULL
+#define iemAImpl_pshuflw NULL
+#define iemAImpl_pshufd NULL
+
+/** @} */
+
+
+#define IEM_REPEAT_0(a_Callback, a_User) do { } while (0)
+#define IEM_REPEAT_1(a_Callback, a_User) a_Callback##_CALLBACK(0, a_User)
+#define IEM_REPEAT_2(a_Callback, a_User) IEM_REPEAT_1(a_Callback, a_User); a_Callback##_CALLBACK(1, a_User)
+#define IEM_REPEAT_3(a_Callback, a_User) IEM_REPEAT_2(a_Callback, a_User); a_Callback##_CALLBACK(2, a_User)
+#define IEM_REPEAT_4(a_Callback, a_User) IEM_REPEAT_3(a_Callback, a_User); a_Callback##_CALLBACK(3, a_User)
+#define IEM_REPEAT_5(a_Callback, a_User) IEM_REPEAT_4(a_Callback, a_User); a_Callback##_CALLBACK(4, a_User)
+#define IEM_REPEAT_6(a_Callback, a_User) IEM_REPEAT_5(a_Callback, a_User); a_Callback##_CALLBACK(5, a_User)
+#define IEM_REPEAT_7(a_Callback, a_User) IEM_REPEAT_6(a_Callback, a_User); a_Callback##_CALLBACK(6, a_User)
+#define IEM_REPEAT_8(a_Callback, a_User) IEM_REPEAT_7(a_Callback, a_User); a_Callback##_CALLBACK(7, a_User)
+#define IEM_REPEAT_9(a_Callback, a_User) IEM_REPEAT_8(a_Callback, a_User); a_Callback##_CALLBACK(8, a_User)
+#define IEM_REPEAT(a_cTimes, a_Callback, a_User) RT_CONCAT(IEM_REPEAT_,a_cTimes)(a_Callback, a_User)
+
+
+
+/** @name Microcode test stubs
+ * @{ */
+
+#define IEM_ARG_CHECK_CALLBACK(a_idx, a_User) int RT_CONCAT(iArgCheck_,a_idx); NOREF(RT_CONCAT(iArgCheck_,a_idx))
+#define IEM_MC_BEGIN(a_cArgs, a_cLocals) \
+ { \
+ const uint8_t cArgs = (a_cArgs); NOREF(cArgs); \
+ const uint8_t cLocals = (a_cLocals); NOREF(cLocals); \
+ const uint8_t fMcBegin = (a_cArgs) + (a_cLocals); \
+ IEM_REPEAT(a_cArgs, IEM_ARG_CHECK, 0); \
+
+#define IEM_MC_END() \
+ }
+
+#define IEM_MC_PAUSE() do { (void)fMcBegin; } while (0)
+#define IEM_MC_CONTINUE() do { (void)fMcBegin; } while (0)
+#define IEM_MC_ADVANCE_RIP() do { (void)fMcBegin; } while (0)
+#define IEM_MC_REL_JMP_S8(a_i8) do { (void)fMcBegin; CHK_TYPE(int8_t, a_i8); } while (0)
+#define IEM_MC_REL_JMP_S16(a_i16) do { (void)fMcBegin; CHK_TYPE(int16_t, a_i16); } while (0)
+#define IEM_MC_REL_JMP_S32(a_i32) do { (void)fMcBegin; CHK_TYPE(int32_t, a_i32); } while (0)
+#define IEM_MC_SET_RIP_U16(a_u16NewIP) do { (void)fMcBegin; CHK_TYPE(uint16_t, a_u16NewIP); } while (0)
+#define IEM_MC_SET_RIP_U32(a_u32NewIP) do { (void)fMcBegin; CHK_TYPE(uint32_t, a_u32NewIP); } while (0)
+#define IEM_MC_SET_RIP_U64(a_u64NewIP) do { (void)fMcBegin; CHK_TYPE(uint64_t, a_u64NewIP); } while (0)
+#define IEM_MC_RAISE_DIVIDE_ERROR() do { (void)fMcBegin; return VERR_TRPM_ACTIVE_TRAP; } while (0)
+#define IEM_MC_MAYBE_RAISE_DEVICE_NOT_AVAILABLE() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_WAIT_DEVICE_NOT_AVAILABLE() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_FPU_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_MMX_RELATED_XCPT_CHECK_SSE_OR_MMXEXT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE_RELATED_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE2_RELATED_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE3_RELATED_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_SSE41_RELATED_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_AVX_RELATED_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_AVX2_RELATED_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_RAISE_GP0_IF_CPL_NOT_ZERO() do { (void)fMcBegin; } while (0)
+#define IEM_MC_RAISE_GP0_IF_EFF_ADDR_UNALIGNED(a_EffAddr, a_cbAlign) \
+ do { (void)fMcBegin; AssertCompile(RT_IS_POWER_OF_TWO(a_cbAlign)); CHK_TYPE(RTGCPTR, a_EffAddr); } while (0)
+#define IEM_MC_MAYBE_RAISE_FSGSBASE_XCPT() do { (void)fMcBegin; } while (0)
+#define IEM_MC_MAYBE_RAISE_NON_CANONICAL_ADDR_GP0(a_u64Addr) do { (void)fMcBegin; } while (0)
+
+#define IEM_MC_LOCAL(a_Type, a_Name) (void)fMcBegin; \
+ a_Type a_Name; NOREF(a_Name); (void)fMcBegin
+#define IEM_MC_LOCAL_CONST(a_Type, a_Name, a_Value) (void)fMcBegin; \
+ a_Type const a_Name = (a_Value); \
+ NOREF(a_Name)
+#define IEM_MC_REF_LOCAL(a_pRefArg, a_Local) (void)fMcBegin; \
+ (a_pRefArg) = &(a_Local)
+
+#define IEM_MC_ARG(a_Type, a_Name, a_iArg) (void)fMcBegin; \
+ RT_CONCAT(iArgCheck_,a_iArg) = 1; NOREF(RT_CONCAT(iArgCheck_,a_iArg)); \
+ int RT_CONCAT3(iArgCheck_,a_iArg,a_Name); NOREF(RT_CONCAT3(iArgCheck_,a_iArg,a_Name)); \
+ AssertCompile((a_iArg) < cArgs); \
+ a_Type a_Name; \
+ NOREF(a_Name)
+#define IEM_MC_ARG_CONST(a_Type, a_Name, a_Value, a_iArg) (void)fMcBegin; \
+ RT_CONCAT(iArgCheck_, a_iArg) = 1; NOREF(RT_CONCAT(iArgCheck_,a_iArg)); \
+ int RT_CONCAT3(iArgCheck_,a_iArg,a_Name); NOREF(RT_CONCAT3(iArgCheck_,a_iArg,a_Name)); \
+ AssertCompile((a_iArg) < cArgs); \
+ a_Type const a_Name = (a_Value); \
+ NOREF(a_Name)
+#define IEM_MC_ARG_XSTATE(a_Name, a_iArg) \
+ IEM_MC_ARG_CONST(PX86XSAVEAREA, a_Name, NULL, a_iArg)
+
+#define IEM_MC_ARG_LOCAL_REF(a_Type, a_Name, a_Local, a_iArg) (void)fMcBegin; \
+ RT_CONCAT(iArgCheck_, a_iArg) = 1; NOREF(RT_CONCAT(iArgCheck_,a_iArg)); \
+ int RT_CONCAT3(iArgCheck_,a_iArg,a_Name); NOREF(RT_CONCAT3(iArgCheck_,a_iArg,a_Name)); \
+ AssertCompile((a_iArg) < cArgs); \
+ a_Type const a_Name = &(a_Local); \
+ NOREF(a_Name)
+#define IEM_MC_ARG_LOCAL_EFLAGS(a_pName, a_Name, a_iArg) (void)fMcBegin; \
+ RT_CONCAT(iArgCheck_, a_iArg) = 1; NOREF(RT_CONCAT(iArgCheck_,a_iArg)); \
+ int RT_CONCAT3(iArgCheck_,a_iArg,a_pName); NOREF(RT_CONCAT3(iArgCheck_,a_iArg,a_pName)); \
+ AssertCompile((a_iArg) < cArgs); \
+ uint32_t a_Name; \
+ uint32_t *a_pName = &a_Name; \
+ NOREF(a_pName)
+
+#define IEM_MC_COMMIT_EFLAGS(a_EFlags) do { CHK_TYPE(uint32_t, a_EFlags); (void)fMcBegin; } while (0)
+#define IEM_MC_ASSIGN(a_VarOrArg, a_CVariableOrConst) do { (a_VarOrArg) = (0); (void)fMcBegin; } while (0)
+#define IEM_MC_ASSIGN_TO_SMALLER IEM_MC_ASSIGN
+
+#define IEM_MC_FETCH_GREG_U8(a_u8Dst, a_iGReg) do { (a_u8Dst) = 0; CHK_TYPE(uint8_t, a_u8Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U8_ZX_U16(a_u16Dst, a_iGReg) do { (a_u16Dst) = 0; CHK_TYPE(uint16_t, a_u16Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U8_ZX_U32(a_u32Dst, a_iGReg) do { (a_u32Dst) = 0; CHK_TYPE(uint32_t, a_u32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U8_ZX_U64(a_u64Dst, a_iGReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U8_SX_U16(a_u16Dst, a_iGReg) do { (a_u16Dst) = 0; CHK_TYPE(uint16_t, a_u16Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U8_SX_U32(a_u32Dst, a_iGReg) do { (a_u32Dst) = 0; CHK_TYPE(uint32_t, a_u32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U8_SX_U64(a_u64Dst, a_iGReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U16(a_u16Dst, a_iGReg) do { (a_u16Dst) = 0; CHK_TYPE(uint16_t, a_u16Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U16_ZX_U32(a_u32Dst, a_iGReg) do { (a_u32Dst) = 0; CHK_TYPE(uint32_t, a_u32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U16_ZX_U64(a_u64Dst, a_iGReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U16_SX_U32(a_u32Dst, a_iGReg) do { (a_u32Dst) = 0; CHK_TYPE(uint32_t, a_u32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U16_SX_U64(a_u64Dst, a_iGReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U32(a_u32Dst, a_iGReg) do { (a_u32Dst) = 0; CHK_TYPE(uint32_t, a_u32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U32_ZX_U64(a_u64Dst, a_iGReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U32_SX_U64(a_u64Dst, a_iGReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U64(a_u64Dst, a_iGReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_GREG_U64_ZX_U64 IEM_MC_FETCH_GREG_U64
+#define IEM_MC_FETCH_SREG_U16(a_u16Dst, a_iSReg) do { (a_u16Dst) = 0; CHK_TYPE(uint16_t, a_u16Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_SREG_ZX_U32(a_u32Dst, a_iSReg) do { (a_u32Dst) = 0; CHK_TYPE(uint32_t, a_u32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_SREG_ZX_U64(a_u64Dst, a_iSReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_SREG_BASE_U64(a_u64Dst, a_iSReg) do { (a_u64Dst) = 0; CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_SREG_BASE_U32(a_u32Dst, a_iSReg) do { (a_u32Dst) = 0; CHK_TYPE(uint32_t, a_u32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_EFLAGS(a_EFlags) do { (a_EFlags) = 0; CHK_TYPE(uint32_t, a_EFlags); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_EFLAGS_U8(a_EFlags) do { (a_EFlags) = 0; CHK_TYPE(uint8_t, a_EFlags); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_FSW(a_u16Fsw) do { (a_u16Fsw) = 0; CHK_TYPE(uint16_t, a_u16Fsw); (void)fFpuRead; (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_FCW(a_u16Fcw) do { (a_u16Fcw) = 0; CHK_TYPE(uint16_t, a_u16Fcw); (void)fFpuRead; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_GREG_U8(a_iGReg, a_u8Value) do { CHK_TYPE(uint8_t, a_u8Value); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_GREG_U16(a_iGReg, a_u16Value) do { CHK_TYPE(uint16_t, a_u16Value); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_GREG_U32(a_iGReg, a_u32Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_GREG_U64(a_iGReg, a_u64Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_GREG_U8_CONST(a_iGReg, a_u8C) do { AssertCompile((uint8_t )(a_u8C) == (a_u8C) ); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_GREG_U16_CONST(a_iGReg, a_u16C) do { AssertCompile((uint16_t)(a_u16C) == (a_u16C)); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_GREG_U32_CONST(a_iGReg, a_u32C) do { AssertCompile((uint32_t)(a_u32C) == (a_u32C)); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_GREG_U64_CONST(a_iGReg, a_u64C) do { AssertCompile((uint64_t)(a_u64C) == (a_u64C)); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_FPUREG_R80_SRC_REF(a_iSt, a_pr80Src) do { CHK_PTYPE(PCRTFLOAT80U, a_pr80Src); Assert((a_iSt) < 8); (void)fMcBegin; } while (0)
+#define IEM_MC_CLEAR_HIGH_GREG_U64(a_iGReg) do { (void)fMcBegin; } while (0)
+#define IEM_MC_CLEAR_HIGH_GREG_U64_BY_REF(a_pu32Dst) do { CHK_PTYPE(uint32_t *, a_pu32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_SREG_BASE_U64(a_iSeg, a_u64Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_SREG_BASE_U32(a_iSeg, a_u32Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_REF_GREG_U8(a_pu8Dst, a_iGReg) do { (a_pu8Dst) = (uint8_t *)((uintptr_t)0); CHK_PTYPE(uint8_t *, a_pu8Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_REF_GREG_U16(a_pu16Dst, a_iGReg) do { (a_pu16Dst) = (uint16_t *)((uintptr_t)0); CHK_PTYPE(uint16_t *, a_pu16Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_REF_GREG_U32(a_pu32Dst, a_iGReg) do { (a_pu32Dst) = (uint32_t *)((uintptr_t)0); CHK_PTYPE(uint32_t *, a_pu32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_REF_GREG_U64(a_pu64Dst, a_iGReg) do { (a_pu64Dst) = (uint64_t *)((uintptr_t)0); CHK_PTYPE(uint64_t *, a_pu64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_REF_EFLAGS(a_pEFlags) do { (a_pEFlags) = (uint32_t *)((uintptr_t)0); CHK_PTYPE(uint32_t *, a_pEFlags); (void)fMcBegin; } while (0)
+
+#define IEM_MC_ADD_GREG_U8(a_iGReg, a_u8Value) do { CHK_CONST(uint8_t, a_u8Value); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_GREG_U16(a_iGReg, a_u16Value) do { CHK_CONST(uint16_t, a_u16Value); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_GREG_U32(a_iGReg, a_u32Value) do { CHK_CONST(uint32_t, a_u32Value); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_GREG_U64(a_iGReg, a_u64Value) do { CHK_CONST(uint64_t, a_u64Value); (void)fMcBegin; } while (0)
+#define IEM_MC_SUB_GREG_U8(a_iGReg, a_u8Value) do { CHK_CONST(uint8_t, a_u8Value); (void)fMcBegin; } while (0)
+#define IEM_MC_SUB_GREG_U16(a_iGReg, a_u16Value) do { CHK_CONST(uint16_t, a_u16Value); (void)fMcBegin; } while (0)
+#define IEM_MC_SUB_GREG_U32(a_iGReg, a_u32Value) do { CHK_CONST(uint32_t, a_u32Value); (void)fMcBegin; } while (0)
+#define IEM_MC_SUB_GREG_U64(a_iGReg, a_u64Value) do { CHK_CONST(uint64_t, a_u64Value); (void)fMcBegin; } while (0)
+#define IEM_MC_SUB_LOCAL_U16(a_u16Value, a_u16Const) do { CHK_CONST(uint16_t, a_u16Const); (void)fMcBegin; } while (0)
+
+#define IEM_MC_AND_GREG_U8(a_iGReg, a_u8Value) do { CHK_CONST(uint8_t, a_u8Value); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_GREG_U16(a_iGReg, a_u16Value) do { CHK_CONST(uint16_t, a_u16Value); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_GREG_U32(a_iGReg, a_u32Value) do { CHK_CONST(uint32_t, a_u32Value); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_GREG_U64(a_iGReg, a_u64Value) do { CHK_CONST(uint64_t, a_u64Value); (void)fMcBegin; } while (0)
+#define IEM_MC_OR_GREG_U8(a_iGReg, a_u8Value) do { CHK_CONST(uint8_t, a_u8Value); (void)fMcBegin; } while (0)
+#define IEM_MC_OR_GREG_U16(a_iGReg, a_u16Value) do { CHK_CONST(uint16_t, a_u16Value); (void)fMcBegin; } while (0)
+#define IEM_MC_OR_GREG_U32(a_iGReg, a_u32Value) do { CHK_CONST(uint32_t, a_u32Value); (void)fMcBegin; } while (0)
+#define IEM_MC_OR_GREG_U64(a_iGReg, a_u64Value) do { CHK_CONST(uint64_t, a_u64Value); (void)fMcBegin; } while (0)
+
+#define IEM_MC_ADD_GREG_U8_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u8Value) += 1; CHK_TYPE(uint8_t, a_u8Value); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_GREG_U16_TO_LOCAL(a_u16Value, a_iGReg) do { (a_u16Value) += 1; CHK_TYPE(uint16_t, a_u16Value); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_GREG_U32_TO_LOCAL(a_u32Value, a_iGReg) do { (a_u32Value) += 1; CHK_TYPE(uint32_t, a_u32Value); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_GREG_U64_TO_LOCAL(a_u64Value, a_iGReg) do { (a_u64Value) += 1; CHK_TYPE(uint64_t, a_u64Value); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_LOCAL_S16_TO_EFF_ADDR(a_EffAddr, a_i16) do { (a_EffAddr) += (a_i16); CHK_GCPTR(a_EffAddr); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_LOCAL_S32_TO_EFF_ADDR(a_EffAddr, a_i32) do { (a_EffAddr) += (a_i32); CHK_GCPTR(a_EffAddr); (void)fMcBegin; } while (0)
+#define IEM_MC_ADD_LOCAL_S64_TO_EFF_ADDR(a_EffAddr, a_i64) do { (a_EffAddr) += (a_i64); CHK_GCPTR(a_EffAddr); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) &= (a_u8Mask); CHK_TYPE(uint8_t, a_u8Local); CHK_CONST(uint8_t, a_u8Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) &= (a_u16Mask); CHK_TYPE(uint16_t, a_u16Local); CHK_CONST(uint16_t, a_u16Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); CHK_TYPE(uint32_t, a_u32Local); CHK_CONST(uint32_t, a_u32Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_LOCAL_U64(a_u64Local, a_u64Mask) do { (a_u64Local) &= (a_u64Mask); CHK_TYPE(uint64_t, a_u64Local); CHK_CONST(uint64_t, a_u64Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_ARG_U16(a_u16Arg, a_u16Mask) do { (a_u16Arg) &= (a_u16Mask); CHK_TYPE(uint16_t, a_u16Arg); CHK_CONST(uint16_t, a_u16Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_ARG_U32(a_u32Arg, a_u32Mask) do { (a_u32Arg) &= (a_u32Mask); CHK_TYPE(uint32_t, a_u32Arg); CHK_CONST(uint32_t, a_u32Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_ARG_U64(a_u64Arg, a_u64Mask) do { (a_u64Arg) &= (a_u64Mask); CHK_TYPE(uint64_t, a_u64Arg); CHK_CONST(uint64_t, a_u64Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_OR_LOCAL_U8(a_u8Local, a_u8Mask) do { (a_u8Local) |= (a_u8Mask); CHK_TYPE(uint8_t, a_u8Local); CHK_CONST(uint8_t, a_u8Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_OR_LOCAL_U16(a_u16Local, a_u16Mask) do { (a_u16Local) |= (a_u16Mask); CHK_TYPE(uint16_t, a_u16Local); CHK_CONST(uint16_t, a_u16Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_OR_LOCAL_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); CHK_TYPE(uint32_t, a_u32Local); CHK_CONST(uint32_t, a_u32Mask); (void)fMcBegin; } while (0)
+#define IEM_MC_SAR_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) >>= (a_cShift); CHK_TYPE(int16_t, a_i16Local); CHK_CONST(uint8_t, a_cShift); (void)fMcBegin; } while (0)
+#define IEM_MC_SAR_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) >>= (a_cShift); CHK_TYPE(int32_t, a_i32Local); CHK_CONST(uint8_t, a_cShift); (void)fMcBegin; } while (0)
+#define IEM_MC_SAR_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) >>= (a_cShift); CHK_TYPE(int64_t, a_i64Local); CHK_CONST(uint8_t, a_cShift); (void)fMcBegin; } while (0)
+#define IEM_MC_SHL_LOCAL_S16(a_i16Local, a_cShift) do { (a_i16Local) <<= (a_cShift); CHK_TYPE(int16_t, a_i16Local); CHK_CONST(uint8_t, a_cShift); (void)fMcBegin; } while (0)
+#define IEM_MC_SHL_LOCAL_S32(a_i32Local, a_cShift) do { (a_i32Local) <<= (a_cShift); CHK_TYPE(int32_t, a_i32Local); CHK_CONST(uint8_t, a_cShift); (void)fMcBegin; } while (0)
+#define IEM_MC_SHL_LOCAL_S64(a_i64Local, a_cShift) do { (a_i64Local) <<= (a_cShift); CHK_TYPE(int64_t, a_i64Local); CHK_CONST(uint8_t, a_cShift); (void)fMcBegin; } while (0)
+#define IEM_MC_AND_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) &= (a_u32Mask); CHK_TYPE(uint32_t, a_u32Local); (void)fMcBegin; } while (0)
+#define IEM_MC_OR_2LOCS_U32(a_u32Local, a_u32Mask) do { (a_u32Local) |= (a_u32Mask); CHK_TYPE(uint32_t, a_u32Local); (void)fMcBegin; } while (0)
+#define IEM_MC_SET_EFL_BIT(a_fBit) do { CHK_SINGLE_BIT(uint32_t, a_fBit); (void)fMcBegin; } while (0)
+#define IEM_MC_CLEAR_EFL_BIT(a_fBit) do { CHK_SINGLE_BIT(uint32_t, a_fBit); (void)fMcBegin; } while (0)
+#define IEM_MC_FLIP_EFL_BIT(a_fBit) do { CHK_SINGLE_BIT(uint32_t, a_fBit); (void)fMcBegin; } while (0)
+#define IEM_MC_CLEAR_FSW_EX() do { (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_TO_MMX_MODE() do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_FROM_MMX_MODE() do { (void)fMcBegin; } while (0)
+
+#define IEM_MC_FETCH_MREG_U64(a_u64Value, a_iMReg) do { (a_u64Value) = 0; CHK_TYPE(uint64_t, a_u64Value); (void)fFpuRead; (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MREG_U32(a_u32Value, a_iMReg) do { (a_u32Value) = 0; CHK_TYPE(uint32_t, a_u32Value); (void)fFpuRead; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MREG_U64(a_iMReg, a_u64Value) do { CHK_TYPE(uint64_t, a_u64Value); (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MREG_U32_ZX_U64(a_iMReg, a_u32Value) do { CHK_TYPE(uint32_t, a_u32Value); (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_MREG_U64(a_pu64Dst, a_iMReg) do { (a_pu64Dst) = (uint64_t *)((uintptr_t)0); CHK_PTYPE(uint64_t *, a_pu64Dst); (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_MREG_U64_CONST(a_pu64Dst, a_iMReg) do { (a_pu64Dst) = (uint64_t const *)((uintptr_t)0); CHK_PTYPE(uint64_t const *, a_pu64Dst); (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_MREG_U32_CONST(a_pu32Dst, a_iMReg) do { (a_pu32Dst) = (uint32_t const *)((uintptr_t)0); CHK_PTYPE(uint32_t const *, a_pu32Dst); (void)fFpuWrite; (void)fMcBegin; } while (0)
+
+#define IEM_MC_FETCH_XREG_U128(a_u128Value, a_iXReg) do { (a_u128Value) = g_u128Zero; CHK_TYPE(RTUINT128U, a_u128Value); (void)fSseRead; (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_XREG_U64(a_u64Value, a_iXReg) do { (a_u64Value) = 0; CHK_TYPE(uint64_t, a_u64Value); (void)fSseRead; (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_XREG_U32(a_u32Value, a_iXReg) do { (a_u32Value) = 0; CHK_TYPE(uint32_t, a_u32Value); (void)fSseRead; (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_XREG_HI_U64(a_u64Value, a_iXReg) do { (a_u64Value) = 0; CHK_TYPE(uint64_t, a_u64Value); (void)fSseRead; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_XREG_U128(a_iXReg, a_u128Value) do { CHK_TYPE(RTUINT128U, a_u128Value); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_XREG_U64(a_iXReg, a_u64Value) do { CHK_TYPE(uint64_t, a_u64Value); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_XREG_U64_ZX_U128(a_iXReg, a_u64Value) do { CHK_TYPE(uint64_t, a_u64Value); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_XREG_U32(a_iXReg, a_u32Value) do { CHK_TYPE(uint32_t, a_u32Value); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_XREG_U32_ZX_U128(a_iXReg, a_u32Value) do { CHK_TYPE(uint32_t, a_u32Value); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_XREG_HI_U64(a_iXReg, a_u64Value) do { CHK_TYPE(uint64_t, a_u64Value); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_XREG_U128(a_pu128Dst, a_iXReg) do { (a_pu128Dst) = (PRTUINT128U)((uintptr_t)0); CHK_PTYPE(PRTUINT128U, a_pu128Dst); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_XREG_U128_CONST(a_pu128Dst, a_iXReg) do { (a_pu128Dst) = (PCRTUINT128U)((uintptr_t)0); CHK_PTYPE(PCRTUINT128U, a_pu128Dst); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_XREG_U64_CONST(a_pu64Dst, a_iXReg) do { (a_pu64Dst) = (uint64_t const *)((uintptr_t)0); CHK_PTYPE(uint64_t const *, a_pu64Dst); (void)fSseWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_COPY_XREG_U128(a_iXRegDst, a_iXRegSrc) do { (void)fSseWrite; (void)fMcBegin; } while (0)
+
+#define IEM_MC_FETCH_YREG_U256(a_u256Value, a_iYRegSrc) do { (a_u256Value).au64[0] = (a_u256Value).au64[1] = (a_u256Value).au64[2] = (a_u256Value).au64[3] = 0; CHK_TYPE(RTUINT256U, a_u256Value); (void)fAvxRead; (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_YREG_U128(a_u128Value, a_iYRegSrc) do { (a_u128Value).au64[0] = (a_u128Value).au64[1] = 0; CHK_TYPE(RTUINT128U, a_u128Value); (void)fAvxRead; (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_YREG_U64(a_u64Value, a_iYRegSrc) do { (a_u64Value) = UINT64_MAX; CHK_TYPE(uint64_t, a_u64Value); (void)fAvxRead; (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_YREG_U32(a_u32Value, a_iYRegSrc) do { (a_u32Value) = UINT32_MAX; CHK_TYPE(uint32_t, a_u32Value); (void)fAvxRead; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_YREG_U32_ZX_VLMAX(a_iYRegDst, a_u32Value) do { CHK_TYPE(uint32_t, a_u32Value); (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_YREG_U64_ZX_VLMAX(a_iYRegDst, a_u64Value) do { CHK_TYPE(uint64_t, a_u64Value); (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_YREG_U128_ZX_VLMAX(a_iYRegDst, a_u128Value) do { CHK_TYPE(RTUINT128U, a_u128Value); (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_YREG_U256_ZX_VLMAX(a_iYRegDst, a_u256Value) do { CHK_TYPE(RTUINT256U, a_u256Value); (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_YREG_U128(a_pu128Dst, a_iYReg) do { (a_pu128Dst) = (PRTUINT128U)((uintptr_t)0); CHK_PTYPE(PRTUINT128U, a_pu128Dst); (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_YREG_U128_CONST(a_pu128Dst, a_iYReg) do { (a_pu128Dst) = (PCRTUINT128U)((uintptr_t)0); CHK_PTYPE(PCRTUINT128U, a_pu128Dst); (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_REF_YREG_U64_CONST(a_pu64Dst, a_iYReg) do { (a_pu64Dst) = (uint64_t const *)((uintptr_t)0); CHK_PTYPE(uint64_t const *, a_pu64Dst); (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_CLEAR_YREG_128_UP(a_iYReg) do { (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_COPY_YREG_U256_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) do { (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_COPY_YREG_U128_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) do { (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_COPY_YREG_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc) do { (void)fAvxWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_MERGE_YREG_U32_U96_ZX_VLMAX(a_iYRegDst, a_iYRegSrc32, a_iYRegSrcHx) do { (void)fAvxWrite; (void)fAvxRead; (void)fMcBegin; } while (0)
+#define IEM_MC_MERGE_YREG_U64_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) do { (void)fAvxWrite; (void)fAvxRead; (void)fMcBegin; } while (0)
+#define IEM_MC_MERGE_YREG_U64HI_U64_ZX_VLMAX(a_iYRegDst, a_iYRegSrc64, a_iYRegSrcHx) do { (void)fAvxWrite; (void)fAvxRead; (void)fMcBegin; } while (0)
+#define IEM_MC_MERGE_YREG_U64LOCAL_U64_ZX_VLMAX(a_iYRegDst, a_u64Local, a_iYRegSrcHx) do { (void)fAvxWrite; (void)fAvxRead; (void)fMcBegin; } while (0)
+
+#define IEM_MC_FETCH_MEM_U8(a_u8Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM16_U8(a_u8Dst, a_iSeg, a_GCPtrMem16) do { CHK_TYPE(uint16_t, a_GCPtrMem16); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM32_U8(a_u8Dst, a_iSeg, a_GCPtrMem32) do { CHK_TYPE(uint32_t, a_GCPtrMem32); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_I16(a_i16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(int16_t, a_i16Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_I32(a_i32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(int32_t, a_i32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_S32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U64_ALIGN_U128(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_I64(a_i64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(int64_t, a_i64Dst); (void)fMcBegin; } while (0)
+
+#define IEM_MC_FETCH_MEM_U8_DISP(a_u8Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint8_t, a_offDisp); CHK_TYPE(uint8_t, a_u8Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U16_DISP(a_u16Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint8_t, a_offDisp); CHK_TYPE(uint16_t, a_u16Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U32_DISP(a_u32Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint8_t, a_offDisp); CHK_TYPE(uint32_t, a_u32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U64_DISP(a_u64Dst, a_iSeg, a_GCPtrMem, a_offDisp) \
+ do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint8_t, a_offDisp); CHK_TYPE(uint64_t, a_u64Dst); (void)fMcBegin; } while (0)
+
+#define IEM_MC_FETCH_MEM_U8_ZX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U8_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U8_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U16_ZX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U16_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U32_ZX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U8_SX_U16(a_u16Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U8_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U8_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U16_SX_U32(a_u32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U16_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U32_SX_U64(a_u64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_R32(a_r32Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTFLOAT32U, a_r32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_R64(a_r64Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTFLOAT64U, a_r64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_R80(a_r80Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTFLOAT80U, a_r80Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U128(a_u128Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT128U, a_u128Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U128_ALIGN_SSE(a_u128Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT128U, a_u128Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U256(a_u256Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT256U, a_u256Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_FETCH_MEM_U256_ALIGN_AVX(a_u256Dst, a_iSeg, a_GCPtrMem) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT256U, a_u256Dst); (void)fMcBegin; } while (0)
+
+#define IEM_MC_STORE_MEM_U8(a_iSeg, a_GCPtrMem, a_u8Value) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(uint8_t, a_u8Value); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U16(a_iSeg, a_GCPtrMem, a_u16Value) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(uint16_t, a_u16Value); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U32(a_iSeg, a_GCPtrMem, a_u32Value) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(uint32_t, a_u32Value); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U64(a_iSeg, a_GCPtrMem, a_u64Value) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(uint64_t, a_u64Value); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U8_CONST(a_iSeg, a_GCPtrMem, a_u8C) do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint8_t, a_u8C); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U16_CONST(a_iSeg, a_GCPtrMem, a_u16C) do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint16_t, a_u16C); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U32_CONST(a_iSeg, a_GCPtrMem, a_u32C) do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint32_t, a_u32C); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U64_CONST(a_iSeg, a_GCPtrMem, a_u64C) do { CHK_GCPTR(a_GCPtrMem); CHK_CONST(uint64_t, a_u64C); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_I8_CONST_BY_REF( a_pi8Dst, a_i8C) do { CHK_TYPE(int8_t *, a_pi8Dst); CHK_CONST(int8_t, a_i8C); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_I16_CONST_BY_REF(a_pi16Dst, a_i16C) do { CHK_TYPE(int16_t *, a_pi16Dst); CHK_CONST(int16_t, a_i16C); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_I32_CONST_BY_REF(a_pi32Dst, a_i32C) do { CHK_TYPE(int32_t *, a_pi32Dst); CHK_CONST(int32_t, a_i32C); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_I64_CONST_BY_REF(a_pi64Dst, a_i64C) do { CHK_TYPE(int64_t *, a_pi64Dst); CHK_CONST(int64_t, a_i64C); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_NEG_QNAN_R32_BY_REF(a_pr32Dst) do { CHK_TYPE(PRTFLOAT32U, a_pr32Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_NEG_QNAN_R64_BY_REF(a_pr64Dst) do { CHK_TYPE(PRTFLOAT64U, a_pr64Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_NEG_QNAN_R80_BY_REF(a_pr80Dst) do { CHK_TYPE(PRTFLOAT80U, a_pr80Dst); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U128(a_iSeg, a_GCPtrMem, a_u128Src) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT128U, a_u128Src); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U128_ALIGN_SSE(a_iSeg, a_GCPtrMem, a_u128Src) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT128U, a_u128Src); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U256(a_iSeg, a_GCPtrMem, a_u256Src) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT256U, a_u256Src); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_MEM_U256_ALIGN_AVX(a_iSeg, a_GCPtrMem, a_u256Src) do { CHK_GCPTR(a_GCPtrMem); CHK_TYPE(RTUINT256U, a_u256Src); CHK_SEG_IDX(a_iSeg); (void)fMcBegin; } while (0)
+
+#define IEM_MC_PUSH_U16(a_u16Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_PUSH_U32(a_u32Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_PUSH_U32_SREG(a_u32Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_PUSH_U64(a_u64Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_POP_U16(a_pu16Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_POP_U32(a_pu32Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_POP_U64(a_pu64Value) do { (void)fMcBegin; } while (0)
+#define IEM_MC_MEM_MAP(a_pMem, a_fAccess, a_iSeg, a_GCPtrMem, a_iArg) do { (void)fMcBegin; } while (0)
+#define IEM_MC_MEM_MAP_EX(a_pvMem, a_fAccess, a_cbMem, a_iSeg, a_GCPtrMem, a_iArg) do { (void)fMcBegin; } while (0)
+#define IEM_MC_MEM_COMMIT_AND_UNMAP(a_pvMem, a_fAccess) do { (void)fMcBegin; } while (0)
+#define IEM_MC_MEM_COMMIT_AND_UNMAP_FOR_FPU_STORE(a_pvMem, a_fAccess, a_u16FSW) do { (void)fMcBegin; } while (0)
+#define IEM_MC_CALC_RM_EFF_ADDR(a_GCPtrEff, bRm, cbImm) do { (a_GCPtrEff) = 0; CHK_GCPTR(a_GCPtrEff); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_VOID_AIMPL_0(a_pfn) do { (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_VOID_AIMPL_1(a_pfn, a0) \
+ do { CHK_CALL_ARG(a0, 0); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_VOID_AIMPL_2(a_pfn, a0, a1) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_VOID_AIMPL_3(a_pfn, a0, a1, a2) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_VOID_AIMPL_4(a_pfn, a0, a1, a2, a3) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_AIMPL_3(a_rc, a_pfn, a0, a1, a2) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); (a_rc) = VINF_SUCCESS; (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_AIMPL_4(a_rc, a_pfn, a0, a1, a2, a3) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); (a_rc) = VINF_SUCCESS; (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_CIMPL_0(a_pfnCImpl) do { (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_CIMPL_1(a_pfnCImpl, a0) \
+ do { CHK_CALL_ARG(a0, 0); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_CIMPL_2(a_pfnCImpl, a0, a1) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_CIMPL_3(a_pfnCImpl, a0, a1, a2) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_CIMPL_4(a_pfnCImpl, a0, a1, a2, a3) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_CIMPL_5(a_pfnCImpl, a0, a1, a2, a3, a4) \
+ do { CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); CHK_CALL_ARG(a4, 4); (void)fMcBegin; } while (0)
+#define IEM_MC_DEFER_TO_CIMPL_0(a_pfnCImpl) (VINF_SUCCESS)
+#define IEM_MC_DEFER_TO_CIMPL_1(a_pfnCImpl, a0) (VINF_SUCCESS)
+#define IEM_MC_DEFER_TO_CIMPL_2(a_pfnCImpl, a0, a1) (VINF_SUCCESS)
+#define IEM_MC_DEFER_TO_CIMPL_3(a_pfnCImpl, a0, a1, a2) (VINF_SUCCESS)
+
+#define IEM_MC_CALL_FPU_AIMPL_1(a_pfnAImpl, a0) \
+ do { (void)fFpuHost; (void)fFpuWrite; CHK_CALL_ARG(a0, 0); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_FPU_AIMPL_2(a_pfnAImpl, a0, a1) \
+ do { (void)fFpuHost; (void)fFpuWrite; CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_FPU_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
+ do { (void)fFpuHost; (void)fFpuWrite; CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); (void)fMcBegin; } while (0)
+#define IEM_MC_SET_FPU_RESULT(a_FpuData, a_FSW, a_pr80Value) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_PUSH_FPU_RESULT(a_FpuData) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_PUSH_FPU_RESULT_MEM_OP(a_FpuData, a_iEffSeg, a_GCPtrEff) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_PUSH_FPU_RESULT_TWO(a_FpuDataTwo) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_FPU_RESULT(a_FpuData, a_iStReg) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_FPU_RESULT_THEN_POP(a_FpuData, a_iStReg) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_FPU_RESULT_MEM_OP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_STORE_FPU_RESULT_MEM_OP_THEN_POP(a_FpuData, a_iStReg, a_iEffSeg, a_GCPtrEff) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_UNDERFLOW(a_iStReg) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP(a_iStReg, a_iEffSeg, a_GCPtrEff) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP(a_iStReg) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_UNDERFLOW_MEM_OP_THEN_POP(a_iStReg, a_iEffSeg, a_GCPtrEff) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_UNDERFLOW_THEN_POP_POP() do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW() do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_PUSH_UNDERFLOW_TWO() do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_PUSH_OVERFLOW() do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_PUSH_OVERFLOW_MEM_OP(a_iEffSeg, a_GCPtrEff) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_UPDATE_FPU_OPCODE_IP() do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_DEC_TOP() do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_INC_TOP() do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_FPU_STACK_FREE(a_iStReg) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_UPDATE_FSW(a_u16FSW) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_UPDATE_FSW_CONST(a_u16FSW) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_UPDATE_FSW_WITH_MEM_OP(a_u16FSW, a_iEffSeg, a_GCPtrEff) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_UPDATE_FSW_THEN_POP(a_u16FSW) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_UPDATE_FSW_WITH_MEM_OP_THEN_POP(a_u16FSW, a_iEffSeg, a_GCPtrEff) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_UPDATE_FSW_THEN_POP_POP(a_u16FSW) do { (void)fFpuWrite; (void)fMcBegin; } while (0)
+#define IEM_MC_PREPARE_FPU_USAGE() (void)fMcBegin; \
+ const int fFpuRead = 1, fFpuWrite = 1, fFpuHost = 1, fSseRead = 1, fSseWrite = 1, fSseHost = 1, fAvxRead = 1, fAvxWrite = 1, fAvxHost = 1
+#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_READ() (void)fMcBegin; const int fFpuRead = 1, fSseRead = 1
+#define IEM_MC_ACTUALIZE_FPU_STATE_FOR_CHANGE() (void)fMcBegin; const int fFpuRead = 1, fFpuWrite = 1, fSseRead = 1, fSseWrite = 1
+#define IEM_MC_PREPARE_SSE_USAGE() (void)fMcBegin; const int fSseRead = 1, fSseWrite = 1, fSseHost = 1
+#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_READ() (void)fMcBegin; const int fSseRead = 1
+#define IEM_MC_ACTUALIZE_SSE_STATE_FOR_CHANGE() (void)fMcBegin; const int fSseRead = 1, fSseWrite = 1
+#define IEM_MC_PREPARE_AVX_USAGE() (void)fMcBegin; const int fAvxRead = 1, fAvxWrite = 1, fAvxHost = 1, fSseRead = 1, fSseWrite = 1, fSseHost = 1
+#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_READ() (void)fMcBegin; const int fAvxRead = 1, fSseRead = 1
+#define IEM_MC_ACTUALIZE_AVX_STATE_FOR_CHANGE() (void)fMcBegin; const int fAvxRead = 1, fAvxWrite = 1, fSseRead = 1, fSseWrite = 1
+
+#define IEM_MC_CALL_MMX_AIMPL_2(a_pfnAImpl, a0, a1) \
+ do { (void)fFpuHost; (void)fFpuWrite; CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_MMX_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
+ do { (void)fFpuHost; (void)fFpuWrite; CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_SSE_AIMPL_2(a_pfnAImpl, a0, a1) \
+ do { (void)fSseHost; (void)fSseWrite; CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_SSE_AIMPL_3(a_pfnAImpl, a0, a1, a2) \
+ do { (void)fSseHost; (void)fSseWrite; CHK_CALL_ARG(a0, 0); CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); (void)fMcBegin; } while (0)
+#define IEM_MC_IMPLICIT_AVX_AIMPL_ARGS() do { IEM_MC_ARG_CONST(PX86XSAVEAREA, pXState, pVCpu->cpum.GstCtx.CTX_SUFF(pXState), 0); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_AVX_AIMPL_2(a_pfnAImpl, a1, a2) \
+ do { (void)fAvxHost; (void)fAvxWrite; CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_AVX_AIMPL_3(a_pfnAImpl, a1, a2, a3) \
+ do { (void)fAvxHost; (void)fAvxWrite; CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); (void)fMcBegin; } while (0)
+#define IEM_MC_CALL_AVX_AIMPL_4(a_pfnAImpl, a1, a2, a3, a4) \
+ do { (void)fAvxHost; (void)fAvxWrite; CHK_CALL_ARG(a1, 1); CHK_CALL_ARG(a2, 2); CHK_CALL_ARG(a3, 3); CHK_CALL_ARG(a4, 4); (void)fMcBegin; } while (0)
+
+#define IEM_MC_IF_EFL_BIT_SET(a_fBit) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_EFL_BIT_NOT_SET(a_fBit) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_EFL_ANY_BITS_SET(a_fBits) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_EFL_NO_BITS_SET(a_fBits) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_EFL_BITS_NE(a_fBit1, a_fBit2) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_EFL_BITS_EQ(a_fBit1, a_fBit2) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_EFL_BIT_SET_OR_BITS_NE(a_fBit, a_fBit1, a_fBit2) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_EFL_BIT_NOT_SET_AND_BITS_EQ(a_fBit, a_fBit1, a_fBit2) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_CX_IS_NZ() (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_ECX_IS_NZ() (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_RCX_IS_NZ() (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_SET(a_fBit) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_SET(a_fBit) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_SET(a_fBit) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_CX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_ECX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_RCX_IS_NZ_AND_EFL_BIT_NOT_SET(a_fBit) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_LOCAL_IS_Z(a_Local) (void)fMcBegin; if ((a_Local) == 0) {
+#define IEM_MC_IF_GREG_BIT_SET(a_iGReg, a_iBitNo) (void)fMcBegin; if (g_fRandom) {
+#define IEM_MC_IF_FPUREG_NOT_EMPTY(a_iSt) (void)fMcBegin; if (g_fRandom != fFpuRead) {
+#define IEM_MC_IF_FPUREG_IS_EMPTY(a_iSt) (void)fMcBegin; if (g_fRandom != fFpuRead) {
+#define IEM_MC_IF_FPUREG_NOT_EMPTY_REF_R80(a_pr80Dst, a_iSt) (void)fMcBegin; \
+ a_pr80Dst = NULL; \
+ if (g_fRandom != fFpuRead) {
+#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80(p0, i0, p1, i1) (void)fMcBegin; \
+ p0 = NULL; \
+ p1 = NULL; \
+ if (g_fRandom != fFpuRead) {
+#define IEM_MC_IF_TWO_FPUREGS_NOT_EMPTY_REF_R80_FIRST(p0, i0, i1) (void)fMcBegin; \
+ p0 = NULL; \
+ if (g_fRandom != fFpuRead) {
+#define IEM_MC_IF_FCW_IM() (void)fMcBegin; if (g_fRandom != fFpuRead) {
+#define IEM_MC_ELSE() } else {
+#define IEM_MC_ENDIF() } do { (void)fMcBegin; } while (0)
+
+/** @} */
+
+#include "../VMMAll/IEMAllInstructions.cpp.h"
+
+
+
+/**
+ * Formalities...
+ */
+int main()
+{
+ RTTEST hTest;
+ RTEXITCODE rcExit = RTTestInitAndCreate("tstIEMCheckMc", &hTest);
+ if (rcExit == RTEXITCODE_SUCCESS)
+ {
+ RTTestBanner(hTest);
+ RTTestPrintf(hTest, RTTESTLVL_ALWAYS, "(this is only a compile test.)");
+ rcExit = RTTestSummaryAndDestroy(hTest);
+ }
+ return rcExit;
+}
diff --git a/src/VBox/VMM/testcase/tstMMHyperHeap.cpp b/src/VBox/VMM/testcase/tstMMHyperHeap.cpp
new file mode 100644
index 00000000..46f818e2
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstMMHyperHeap.cpp
@@ -0,0 +1,284 @@
+/* $Id: tstMMHyperHeap.cpp $ */
+/** @file
+ * MM Hypervisor Heap testcase.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/stam.h>
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/gvm.h>
+#include <VBox/sup.h>
+#include <VBox/param.h>
+#include <iprt/errcore.h>
+
+#include <VBox/log.h>
+#include <iprt/initterm.h>
+#include <iprt/mem.h>
+#include <iprt/assert.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+
+#define NUM_CPUS 16
+
+#define OUTPUT(a) do { Log(a); RTPrintf a; } while (0)
+
+/**
+ * Entry point.
+ */
+extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
+{
+ RT_NOREF1(envp);
+
+ /*
+ * Init runtime.
+ */
+ int rc = RTR3InitExe(argc, &argv, 0);
+ AssertRCReturn(rc, RTEXITCODE_INIT);
+
+ /*
+ * Create empty VM structure and call MMR3Init().
+ */
+ void *pvVM = NULL;
+ RTR0PTR pvR0 = NIL_RTR0PTR;
+ SUPPAGE aPages[(sizeof(GVM) + NUM_CPUS * sizeof(GVMCPU)) >> PAGE_SHIFT];
+ rc = SUPR3Init(NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Fatal error: SUP failure! rc=%Rrc\n", rc);
+ return RTEXITCODE_FAILURE;
+ }
+ rc = SUPR3PageAllocEx(RT_ELEMENTS(aPages), 0, &pvVM, &pvR0, &aPages[0]);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Fatal error: Allocation failure! rc=%Rrc\n", rc);
+ return RTEXITCODE_FAILURE;
+ }
+ RT_BZERO(pvVM, RT_ELEMENTS(aPages) * PAGE_SIZE); /* SUPR3PageAllocEx doesn't necessarily zero the memory. */
+ PVM pVM = (PVM)pvVM;
+ pVM->paVMPagesR3 = aPages;
+ pVM->pVMR0ForCall = pvR0;
+
+ PUVM pUVM = (PUVM)RTMemPageAllocZ(RT_ALIGN_Z(sizeof(*pUVM), PAGE_SIZE));
+ if (!pUVM)
+ {
+ RTPrintf("Fatal error: RTMEmPageAllocZ failed\n");
+ return RTEXITCODE_FAILURE;
+ }
+ pUVM->u32Magic = UVM_MAGIC;
+ pUVM->pVM = pVM;
+ pVM->pUVM = pUVM;
+
+ pVM->cCpus = NUM_CPUS;
+ pVM->cbSelf = sizeof(VM);
+ pVM->cbVCpu = sizeof(VMCPU);
+ PVMCPU pVCpu = (PVMCPU)((uintptr_t)pVM + sizeof(GVM));
+ for (VMCPUID idCpu = 0; idCpu < NUM_CPUS; idCpu++)
+ {
+ pVM->apCpusR3[idCpu] = pVCpu;
+ pVCpu = (PVMCPU)((uintptr_t)pVCpu + sizeof(GVMCPU));
+ }
+
+ rc = STAMR3InitUVM(pUVM);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("FAILURE: STAMR3Init failed. rc=%Rrc\n", rc);
+ return 1;
+ }
+
+ rc = MMR3InitUVM(pUVM);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("FAILURE: STAMR3Init failed. rc=%Rrc\n", rc);
+ return 1;
+ }
+
+ rc = CFGMR3Init(pVM, NULL, NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("FAILURE: CFGMR3Init failed. rc=%Rrc\n", rc);
+ return 1;
+ }
+
+ rc = MMR3Init(pVM);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Fatal error: MMR3Init failed! rc=%Rrc\n", rc);
+ return 1;
+ }
+
+ /*
+ * Try allocate.
+ */
+ static struct
+ {
+ size_t cb;
+ unsigned uAlignment;
+ void *pvAlloc;
+ unsigned iFreeOrder;
+ } aOps[] =
+ {
+ { 16, 0, NULL, 0 },
+ { 16, 4, NULL, 1 },
+ { 16, 8, NULL, 2 },
+ { 16, 16, NULL, 5 },
+ { 16, 32, NULL, 4 },
+ { 32, 0, NULL, 3 },
+ { 31, 0, NULL, 6 },
+ { 1024, 0, NULL, 8 },
+ { 1024, 32, NULL, 10 },
+ { 1024, 32, NULL, 12 },
+ { PAGE_SIZE, PAGE_SIZE, NULL, 13 },
+ { 1024, 32, NULL, 9 },
+ { PAGE_SIZE, 32, NULL, 11 },
+ { PAGE_SIZE, PAGE_SIZE, NULL, 14 },
+ { 16, 0, NULL, 15 },
+ { 9, 0, NULL, 7 },
+ { 16, 0, NULL, 7 },
+ { 36, 0, NULL, 7 },
+ { 16, 0, NULL, 7 },
+ { 12344, 0, NULL, 7 },
+ { 50, 0, NULL, 7 },
+ { 16, 0, NULL, 7 },
+ };
+ unsigned i;
+#ifdef DEBUG
+ MMHyperHeapDump(pVM);
+#endif
+ size_t cbBefore = MMHyperHeapGetFreeSize(pVM);
+ static char szFill[] = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+ /* allocate */
+ for (i = 0; i < RT_ELEMENTS(aOps); i++)
+ {
+ rc = MMHyperAlloc(pVM, aOps[i].cb, aOps[i].uAlignment, MM_TAG_VM, &aOps[i].pvAlloc);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %d i=%d\n", aOps[i].cb, aOps[i].uAlignment, rc, i);
+ return 1;
+ }
+ memset(aOps[i].pvAlloc, szFill[i], aOps[i].cb);
+ if (RT_ALIGN_P(aOps[i].pvAlloc, (aOps[i].uAlignment ? aOps[i].uAlignment : 8)) != aOps[i].pvAlloc)
+ {
+ RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %p, invalid alignment!\n", aOps[i].cb, aOps[i].uAlignment, aOps[i].pvAlloc);
+ return 1;
+ }
+ }
+
+ /* free and allocate the same node again. */
+#ifdef DEBUG
+ MMHyperHeapDump(pVM);
+#endif
+ for (i = 0; i < RT_ELEMENTS(aOps); i++)
+ {
+ if ( !aOps[i].pvAlloc
+ || aOps[i].uAlignment == PAGE_SIZE)
+ continue;
+ size_t cbBeforeSub = MMHyperHeapGetFreeSize(pVM);
+ rc = MMHyperFree(pVM, aOps[i].pvAlloc);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Failure: MMHyperFree(, %p,) -> %d i=%d\n", aOps[i].pvAlloc, rc, i);
+ return 1;
+ }
+ size_t const cbFreed = MMHyperHeapGetFreeSize(pVM);
+ void *pv;
+ rc = MMHyperAlloc(pVM, aOps[i].cb, aOps[i].uAlignment, MM_TAG_VM_REQ, &pv);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Failure: MMHyperAlloc(, %#x, %#x,) -> %d i=%d\n", aOps[i].cb, aOps[i].uAlignment, rc, i);
+ return 1;
+ }
+ if (pv != aOps[i].pvAlloc)
+ {
+ RTPrintf("Failure: Free+Alloc returned different address. new=%p old=%p i=%d (doesn't work with delayed free)\n", pv, aOps[i].pvAlloc, i);
+ //return 1;
+ }
+ aOps[i].pvAlloc = pv;
+ OUTPUT(("debug: i=%02d cbBeforeSub=%d cbFreed=%d now=%d\n", i, cbBeforeSub, cbFreed, MMHyperHeapGetFreeSize(pVM)));
+#if 0 /* won't work :/ */
+ size_t cbAfterSub = MMHyperHeapGetFreeSize(pVM);
+ if (cbBeforeSub != cbAfterSub)
+ {
+ RTPrintf("Failure: cbBeforeSub=%d cbAfterSub=%d. i=%d\n", cbBeforeSub, cbAfterSub, i);
+ return 1;
+ }
+#endif
+ }
+
+ /* free it in a specific order. */
+ int cFreed = 0;
+ for (i = 0; i < RT_ELEMENTS(aOps); i++)
+ {
+ unsigned j;
+ for (j = 0; j < RT_ELEMENTS(aOps); j++)
+ {
+ if ( aOps[j].iFreeOrder != i
+ || !aOps[j].pvAlloc)
+ continue;
+ OUTPUT(("j=%02d i=%02d free=%d cb=%5u pv=%p\n", j, i, MMHyperHeapGetFreeSize(pVM), aOps[j].cb, aOps[j].pvAlloc));
+ if (aOps[j].uAlignment == PAGE_SIZE)
+ cbBefore -= aOps[j].cb;
+ else
+ {
+ rc = MMHyperFree(pVM, aOps[j].pvAlloc);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Failure: MMHyperFree(, %p,) -> %d j=%d i=%d\n", aOps[j].pvAlloc, rc, i, j);
+ return 1;
+ }
+ }
+ aOps[j].pvAlloc = NULL;
+ cFreed++;
+ }
+ }
+ Assert(cFreed == RT_ELEMENTS(aOps));
+ OUTPUT(("i=done free=%d\n", MMHyperHeapGetFreeSize(pVM)));
+
+ /* check that we're back at the right amount of free memory. */
+ size_t cbAfter = MMHyperHeapGetFreeSize(pVM);
+ if (cbBefore != cbAfter)
+ {
+ OUTPUT(("Warning: Either we've split out an alignment chunk at the start, or we've got\n"
+ " an alloc/free accounting bug: cbBefore=%d cbAfter=%d\n", cbBefore, cbAfter));
+#ifdef DEBUG
+ MMHyperHeapDump(pVM);
+#endif
+ }
+
+ RTPrintf("tstMMHyperHeap: Success\n");
+#ifdef LOG_ENABLED
+ RTLogFlush(NULL);
+#endif
+ SUPR3PageFreeEx(pVM, RT_ELEMENTS(aPages));
+ RTMemPageFree(pUVM, RT_ALIGN_Z(sizeof(*pUVM), PAGE_SIZE));
+ return 0;
+}
+
+
+#if !defined(VBOX_WITH_HARDENING) || !defined(RT_OS_WINDOWS)
+/**
+ * Main entry point.
+ */
+int main(int argc, char **argv, char **envp)
+{
+ return TrustedMain(argc, argv, envp);
+}
+#endif
+
diff --git a/src/VBox/VMM/testcase/tstMicro.h b/src/VBox/VMM/testcase/tstMicro.h
new file mode 100644
index 00000000..e8bd8a0a
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstMicro.h
@@ -0,0 +1,146 @@
+/* $Id: tstMicro.h $ */
+/** @file
+ * Micro Testcase, profiling special CPU operations.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+#ifndef VMM_INCLUDED_SRC_testcase_tstMicro_h
+#define VMM_INCLUDED_SRC_testcase_tstMicro_h
+#ifndef RT_WITHOUT_PRAGMA_ONCE
+# pragma once
+#endif
+
+/**
+ * The testcase identifier.
+ */
+typedef enum TSTMICROTEST
+{
+ TSTMICROTEST_OVERHEAD = 0,
+ TSTMICROTEST_INVLPG_0,
+ TSTMICROTEST_INVLPG_EIP,
+ TSTMICROTEST_INVLPG_ESP,
+ TSTMICROTEST_CR3_RELOAD,
+ TSTMICROTEST_WP_DISABLE,
+ TSTMICROTEST_WP_ENABLE,
+
+ TSTMICROTEST_TRAP_FIRST,
+ TSTMICROTEST_PF_R0 = TSTMICROTEST_TRAP_FIRST,
+ TSTMICROTEST_PF_R1,
+ TSTMICROTEST_PF_R2,
+ TSTMICROTEST_PF_R3,
+
+ /** The max testcase. */
+ TSTMICROTEST_MAX
+} TSTMICROTEST;
+
+
+/**
+ *
+ */
+typedef struct TSTMICRORESULT
+{
+ /** The total number of ticks spent executing the testcase.
+ * This may include extra overhead stuff if we're weird stuff during trap handler. */
+ uint64_t cTotalTicks;
+ /** Number of ticks spent getting into Rx from R0.
+ * This will include time spent setting up the testcase in R3. */
+ uint64_t cToRxFirstTicks;
+ /** Number of ticks spent executing the trap.
+ * I.e. from right before trapping instruction to the start of the trap handler.
+ * This does not apply to testcases which doesn't trap. */
+ uint64_t cTrapTicks;
+ /** Number of ticks spent resuming Rx executing after a trap.
+ * This does not apply to testcases which doesn't trap. */
+ uint64_t cToRxTrapTicks;
+ /** Number of ticks to get to back to r0 after resuming the trapped code.
+ * This does not apply to testcases which doesn't trap. */
+ uint64_t cToR0Ticks;
+} TSTMICRORESULT, *PTSTMICRORESULT;
+
+/**
+ * Micro profiling testcase
+ */
+typedef struct TSTMICRO
+{
+ /** The RC address of this structure. */
+ RTRCPTR RCPtr;
+ /** Just for proper alignment. */
+ RTRCPTR RCPtrStack;
+
+ /** TSC sampled right before leaving R0. */
+ uint64_t u64TSCR0Start;
+ /** TSC sampled right before the exception. */
+ uint64_t u64TSCRxStart;
+ /** TSC sampled right after entering the trap handler. */
+ uint64_t u64TSCR0Enter;
+ /** TSC sampled right before exitting the trap handler. */
+ uint64_t u64TSCR0Exit;
+ /** TSC sampled right after resuming guest trap. */
+ uint64_t u64TSCRxEnd;
+ /** TSC sampled right after re-entering R0. */
+ uint64_t u64TSCR0End;
+ /** Number of times entered (should be one). */
+ uint32_t cHits;
+ /** Advance EIP. */
+ int32_t offEIPAdd;
+ /** The last CR3 code. */
+ uint32_t u32CR2;
+ /** The last error code. */
+ uint32_t u32ErrCd;
+ /** The last trap eip. */
+ uint32_t u32EIP;
+ /** The original IDT address and limit. */
+ VBOXIDTR OriginalIDTR;
+ /** Our IDT. */
+ VBOXIDTE aIDT[256];
+
+ /** The overhead for the rdtsc + 2 xchg instr. */
+ uint64_t u64Overhead;
+
+ /** The testresults. */
+ TSTMICRORESULT aResults[TSTMICROTEST_MAX];
+ /** Ring-3 stack. */
+ uint8_t au8Stack[4096];
+
+} TSTMICRO, *PTSTMICRO;
+
+
+RT_C_DECLS_BEGIN
+
+DECLASM(void) idtOnly42(PTSTMICRO pTst);
+
+
+DECLASM(void) tstOverhead(PTSTMICRO pTst);
+DECLASM(void) tstInvlpg0(PTSTMICRO pTst);
+DECLASM(void) tstInvlpgEIP(PTSTMICRO pTst);
+DECLASM(void) tstInvlpgESP(PTSTMICRO pTst);
+DECLASM(void) tstCR3Reload(PTSTMICRO pTst);
+DECLASM(void) tstWPEnable(PTSTMICRO pTst);
+DECLASM(void) tstWPDisable(PTSTMICRO pTst);
+
+
+DECLASM(int) tstPFR0(PTSTMICRO pTst);
+DECLASM(int) tstPFR1(PTSTMICRO pTst);
+DECLASM(int) tstPFR2(PTSTMICRO pTst);
+DECLASM(int) tstPFR3(PTSTMICRO pTst);
+
+
+
+DECLASM(void) tstTrapHandlerNoErr(void);
+DECLASM(void) tstTrapHandler(void);
+DECLASM(void) tstInterrupt42(void);
+
+RT_C_DECLS_END
+
+#endif /* !VMM_INCLUDED_SRC_testcase_tstMicro_h */
diff --git a/src/VBox/VMM/testcase/tstMicro.mac b/src/VBox/VMM/testcase/tstMicro.mac
new file mode 100644
index 00000000..f89ab7bc
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstMicro.mac
@@ -0,0 +1,40 @@
+; $Id: tstMicro.mac $
+;; @file
+; Micro Testcase, profiling special CPU operations.
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+%ifndef __tstMicro_mac__
+%define __tstMicro_mac__
+
+
+struc TSTMICRO
+ .RCPtr resd 1
+ .RCPtrStack resd 1
+ .u64TSCR0Start resq 1
+ .u64TSCRxStart resq 1
+ .u64TSCR0Enter resq 1
+ .u64TSCR0Exit resq 1
+ .u64TSCRxEnd resq 1
+ .u64TSCR0End resq 1
+ .cHits resd 1
+ .offEIPAdd resd 1
+ .u32CR2 resd 1
+ .u32ErrCd resd 1
+ .u32EIP resd 1
+ .OriginalIDTR resb 6
+endstruc
+
+
+%endif
diff --git a/src/VBox/VMM/testcase/tstMicroRC.cpp b/src/VBox/VMM/testcase/tstMicroRC.cpp
new file mode 100644
index 00000000..7540b510
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstMicroRC.cpp
@@ -0,0 +1,258 @@
+/* $Id: tstMicroRC.cpp $ */
+/** @file
+ * Micro Testcase, profiling special CPU operations - GC Code (hacks).
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/selm.h>
+#include "tstMicro.h"
+
+#include <iprt/errcore.h>
+#include <iprt/asm-amd64-x86.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/string.h>
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN
+DECLEXPORT(int) tstMicroRC(PTSTMICRO pTst, unsigned uTestcase);
+RT_C_DECLS_END
+
+
+/**
+ * Save and load our IDT.
+ *
+ * @param pTst Pointer to the instance data.
+ * @param iIDT The index of the IDT entry which should be hooked.
+ */
+void idtInstall(PTSTMICRO pTst, int iIDT)
+{
+ RTIDTR Idtr;
+ ASMGetIDTR(&Idtr);
+ if (Idtr.pIdt == (uintptr_t)&pTst->aIDT[0])
+ return;
+ pTst->OriginalIDTR.cbIdt = Idtr.cbIdt;
+ pTst->OriginalIDTR.pIdt = Idtr.pIdt;
+
+ /*
+ * Copy the IDT.
+ */
+ if (Idtr.cbIdt >= sizeof(pTst->aIDT))
+ Idtr.cbIdt = sizeof(pTst->aIDT) - 1;
+ memcpy(&pTst->aIDT[0], (void *)Idtr.pIdt, Idtr.cbIdt + 1);
+
+
+ /* Hook up IDT entry. */
+ if (iIDT >= 0)
+ {
+ uintptr_t uHandler = (uintptr_t)tstTrapHandlerNoErr;
+ if ( iIDT == 8
+ || iIDT == 0xa
+ || iIDT == 0xb
+ || iIDT == 0xc
+ || iIDT == 0xd
+ || iIDT == 0xe
+ || iIDT == 0x11)
+ uHandler = (uintptr_t)tstTrapHandler;
+ pTst->aIDT[iIDT].Int.u16OffsetHigh = uHandler >> 16;
+ pTst->aIDT[iIDT].Int.u16OffsetLow = uHandler & 0xffff;
+ pTst->aIDT[iIDT].Int.u16SegSel = SELMGetHyperCS(&g_VM);
+ pTst->aIDT[iIDT].Int.u2DPL = 3;
+ pTst->aIDT[iIDT].Int.u1Present = 1;
+ pTst->aIDT[iIDT].Int.u1Fixed0 = 0;
+ pTst->aIDT[iIDT].Int.u1Fixed1 = 0;
+ pTst->aIDT[iIDT].Int.u1Fixed2 = 0;
+ pTst->aIDT[iIDT].Int.u1Fixed3 = 0;
+ pTst->aIDT[iIDT].Int.u1Fixed4 = 1;
+ pTst->aIDT[iIDT].Int.u1Fixed5 = 1;
+ pTst->aIDT[iIDT].Int.u132BitGate = 1;
+ pTst->aIDT[iIDT].Int.u1Fixed6 = 0;
+ pTst->aIDT[iIDT].Int.u5Reserved2 = 0;
+ }
+
+ /* Install int 42h, R3 gate */
+ pTst->aIDT[0x42].Int.u16OffsetHigh = (uintptr_t)tstInterrupt42 >> 16;
+ pTst->aIDT[0x42].Int.u16OffsetLow = (uintptr_t)tstInterrupt42 & 0xffff;
+ pTst->aIDT[0x42].Int.u16SegSel = SELMGetHyperCS(&g_VM);
+ pTst->aIDT[0x42].Int.u2DPL = 3;
+ pTst->aIDT[0x42].Int.u1Present = 1;
+ pTst->aIDT[0x42].Int.u1Fixed0 = 0;
+ pTst->aIDT[0x42].Int.u1Fixed1 = 0;
+ pTst->aIDT[0x42].Int.u1Fixed2 = 0;
+ pTst->aIDT[0x42].Int.u1Fixed3 = 0;
+ pTst->aIDT[0x42].Int.u1Fixed4 = 1;
+ pTst->aIDT[0x42].Int.u1Fixed5 = 1;
+ pTst->aIDT[0x42].Int.u132BitGate = 1;
+ pTst->aIDT[0x42].Int.u1Fixed6 = 0;
+ pTst->aIDT[0x42].Int.u5Reserved2 = 0;
+
+ /*
+ * Load our IDT.
+ */
+ Idtr.pIdt = (uintptr_t)&pTst->aIDT[0];
+ ASMSetIDTR(&Idtr);
+
+ RTIDTR Idtr2;
+ ASMGetIDTR(&Idtr2);
+ Assert(Idtr2.pIdt == (uintptr_t)&pTst->aIDT[0]);
+}
+
+
+/**
+ * Removes all trap overrides except for gate 42.
+ */
+DECLASM(void) idtOnly42(PTSTMICRO pTst)
+{
+ if (pTst->OriginalIDTR.pIdt)
+ memcpy(&pTst->aIDT[0], (void *)(uintptr_t)pTst->OriginalIDTR.pIdt, sizeof(VBOXIDTE) * 32);
+}
+
+
+
+DECLEXPORT(int) tstMicroRC(PTSTMICRO pTst, unsigned uTestcase)
+{
+ RTLogPrintf("pTst=%p uTestcase=%d\n", pTst, uTestcase);
+
+ /*
+ * Validate input.
+ */
+ if (uTestcase >= TSTMICROTEST_MAX)
+ return VERR_INVALID_PARAMETER;
+
+ /*
+ * Clear the results.
+ */
+ pTst->u64TSCR0Start = 0;
+ pTst->u64TSCRxStart = 0;
+ pTst->u64TSCR0Enter = 0;
+ pTst->u64TSCR0Exit = 0;
+ pTst->u64TSCRxEnd = 0;
+ pTst->u64TSCR0End = 0;
+ pTst->cHits = 0;
+ pTst->offEIPAdd = 0;
+ pTst->u32CR2 = 0;
+ pTst->u32EIP = 0;
+ pTst->u32ErrCd = 0;
+ PTSTMICRORESULT pRes = &pTst->aResults[uTestcase];
+ memset(&pTst->aResults[uTestcase], 0, sizeof(pTst->aResults[uTestcase]));
+
+
+ /*
+ * Do the testcase.
+ */
+ int rc = VINF_SUCCESS;
+ switch (uTestcase)
+ {
+ case TSTMICROTEST_OVERHEAD:
+ {
+ tstOverhead(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_INVLPG_0:
+ {
+ tstInvlpg0(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_INVLPG_EIP:
+ {
+ tstInvlpgEIP(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_INVLPG_ESP:
+ {
+ tstInvlpgESP(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_CR3_RELOAD:
+ {
+ tstCR3Reload(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_WP_DISABLE:
+ {
+ tstWPDisable(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_WP_ENABLE:
+ {
+ tstWPEnable(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_PF_R0:
+ {
+ idtInstall(pTst, 0xe);
+ pTst->offEIPAdd = 2;
+ rc = tstPFR0(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_PF_R1:
+ {
+ idtInstall(pTst, 0xe);
+ pTst->offEIPAdd = 2;
+ rc = tstPFR1(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_PF_R2:
+ {
+ idtInstall(pTst, 0xe);
+ pTst->offEIPAdd = 2;
+ rc = tstPFR2(pTst);
+ break;
+ }
+
+ case TSTMICROTEST_PF_R3:
+ {
+ idtInstall(pTst, 0xe);
+ pTst->offEIPAdd = 2;
+ rc = tstPFR3(pTst);
+ break;
+ }
+
+ }
+
+ /*
+ * Compute the results.
+ */
+ if (pTst->u64TSCR0End && pTst->u64TSCR0Start)
+ pRes->cTotalTicks = pTst->u64TSCR0End - pTst->u64TSCR0Start - pTst->u64Overhead;
+ if (pTst->u64TSCRxStart && pTst->u64TSCR0Start)
+ pRes->cToRxFirstTicks = pTst->u64TSCRxStart - pTst->u64TSCR0Start - pTst->u64Overhead;
+ if (pTst->u64TSCR0Enter && pTst->u64TSCRxStart)
+ pRes->cTrapTicks = pTst->u64TSCR0Enter - pTst->u64TSCRxStart - pTst->u64Overhead;
+ if (pTst->u64TSCRxEnd && pTst->u64TSCR0Exit)
+ pRes->cToRxTrapTicks = pTst->u64TSCRxEnd - pTst->u64TSCR0Exit - pTst->u64Overhead;
+ if (pTst->u64TSCR0End && pTst->u64TSCRxEnd)
+ pRes->cToR0Ticks = pTst->u64TSCR0End - pTst->u64TSCRxEnd - pTst->u64Overhead;
+
+ return rc;
+}
+
diff --git a/src/VBox/VMM/testcase/tstMicroRC.def b/src/VBox/VMM/testcase/tstMicroRC.def
new file mode 100644
index 00000000..11b73ce9
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstMicroRC.def
@@ -0,0 +1,28 @@
+;; @file
+;
+; VMM Guest Context Micro Benchmark - Definition file.
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+LIBRARY tstMicroRC.gc
+EXPORTS
+ ; data
+
+ ; code
+ tstMicroRCAsmStart
+ tstMicroRCAsmEnd
+ tstPFR1
+ tstPFR2
+ tstPFR3
+ tstTrapHandlerNoErr
+
diff --git a/src/VBox/VMM/testcase/tstMicroRCA.asm b/src/VBox/VMM/testcase/tstMicroRCA.asm
new file mode 100644
index 00000000..19c9a9b7
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstMicroRCA.asm
@@ -0,0 +1,558 @@
+; $Id: tstMicroRCA.asm $
+;; @file
+; tstMicroRCA
+;
+
+;
+; Copyright (C) 2006-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "VBox/asmdefs.mac"
+%include "iprt/x86.mac"
+%include "VBox/vmm/cpum.mac"
+%include "VBox/err.mac"
+%include "VBox/vmm/vm.mac"
+%include "tstMicro.mac"
+
+
+;*******************************************************************************
+;* Defined Constants And Macros *
+;*******************************************************************************
+;;
+; Function prolog which saves everything and loads the first parameter into ebx.
+%macro PROLOG 0
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+ mov ebx, [ebp + 8] ; pTst
+%endm
+
+;;
+; Function epilog which saves everything and loads the first parameter into ebx.
+%macro EPILOG 0
+ pop edi
+ pop esi
+ pop ebx
+ leave
+%endm
+
+;;
+; Does an rdtsc (trashing edx:eax) and move the result to edi:esi.
+%macro RDTSC_EDI_ESI 0
+ rdtsc
+ xchg eax, esi
+ xchg edx, edi
+%endm
+
+;;
+; Does an rdtsc (trashing edx:eax) and move the result to ecx:ebp.
+%macro RDTSC_ECX_EBP 0
+ rdtsc
+ xchg eax, ebp
+ xchg edx, ecx
+%endm
+
+;;
+; Saves the result of an instruction profiling operation.
+;
+; Input is in edi:esi (start) and [ebp + 8] points to TSTMICRO.
+; Trashes ebx.
+%macro STORE_INSTR_PRF_RESULT 0
+ mov ebx, [ebp + 8]
+ mov [ebx + TSTMICRO.u64TSCR0Start ], esi
+ mov [ebx + TSTMICRO.u64TSCR0Start + 4], edi
+ mov [ebx + TSTMICRO.u64TSCR0End ], eax
+ mov [ebx + TSTMICRO.u64TSCR0End + 4], edx
+%endm
+
+;;
+; Samples the end time of an instruction profiling operation and
+; Saves the result of an instruction profiling operation.
+;
+; Input is in edi:esi (start) and [ebp + 8] points to TSTMICRO.
+; Trashes ebx.
+%macro RDTSC_STORE_INSTR_PRF_RESULT 0
+ rdtsc
+ STORE_INSTR_PRF_RESULT
+%endm
+
+
+;;
+; copies the stack to gabStackCopy and saves ESP and EBP in gpESP and gpEBP.
+;
+; @param %1 The resume label.
+; @param ebx TSTMICRO pointer.
+; @uses ecx, edi, esi, flags.
+%macro COPY_STACK_ESP_EBP_RESUME 1
+ mov [gpTst], ebx
+ mov [gESPResume], esp
+ mov [gEBPResume], ebp
+ mov dword [gEIPResume], %1
+
+ mov esi, esp
+ and esi, ~0fffh
+ mov edi, gabStackCopy
+ mov ecx, 01000h / 4
+ rep movsd
+
+%endm
+
+
+;*******************************************************************************
+;* Global Variables *
+;*******************************************************************************
+BEGINDATA
+gpTst dd 0
+
+gESPResume dd 0
+gEBPResume dd 0
+gEIPResume dd 0
+
+BEGINBSS
+gabStackCopy resb 4096
+
+extern NAME(idtOnly42)
+extern IMPNAME(g_VM)
+
+BEGINCODE
+EXPORTEDNAME tstMicroRCAsmStart
+
+
+;;
+; Check the overhead of doing rdtsc + two xchg operations.
+;
+BEGINPROC tstOverhead
+ PROLOG
+
+ RDTSC_EDI_ESI
+ RDTSC_STORE_INSTR_PRF_RESULT
+
+ EPILOG
+ ret
+ENDPROC tstOverhead
+
+
+;;
+; Invalidate page 0.
+;
+BEGINPROC tstInvlpg0
+ PROLOG
+
+ RDTSC_EDI_ESI
+ invlpg [0]
+ RDTSC_STORE_INSTR_PRF_RESULT
+
+ EPILOG
+ ret
+ENDPROC tstInvlpg0
+
+;;
+; Invalidate the current code page.
+;
+BEGINPROC tstInvlpgEIP
+ PROLOG
+
+ RDTSC_EDI_ESI
+ invlpg [NAME(tstInvlpgEIP)]
+ RDTSC_STORE_INSTR_PRF_RESULT
+
+ EPILOG
+ ret
+ENDPROC tstInvlpgEIP
+
+
+;;
+; Invalidate page 0.
+;
+BEGINPROC tstInvlpgESP
+ PROLOG
+
+ RDTSC_EDI_ESI
+ invlpg [esp]
+ RDTSC_STORE_INSTR_PRF_RESULT
+
+ EPILOG
+ ret
+ENDPROC tstInvlpgESP
+
+
+;;
+; cr3 reload sequence.
+;
+BEGINPROC tstCR3Reload
+ PROLOG
+
+ RDTSC_EDI_ESI
+ mov ebx, cr3
+ mov cr3, ebx
+ RDTSC_STORE_INSTR_PRF_RESULT
+
+ EPILOG
+ ret
+ENDPROC tstCR3Reload
+
+
+;;
+; Enable WP sequence.
+;
+BEGINPROC tstWPEnable
+ PROLOG
+
+ RDTSC_EDI_ESI
+ mov ebx, cr0
+ or ebx, X86_CR0_WRITE_PROTECT
+ mov cr0, ebx
+ rdtsc
+ ; disabled it now or we'll die...
+ and ebx, ~X86_CR0_WRITE_PROTECT
+ mov cr0, ebx
+ STORE_INSTR_PRF_RESULT
+
+ EPILOG
+ ret
+ENDPROC tstWPEnable
+
+
+;;
+; Disable WP sequence.
+;
+BEGINPROC tstWPDisable
+ PROLOG
+
+ ;
+ mov ebx, cr0
+ or ebx, X86_CR0_WRITE_PROTECT
+ mov cr0, ebx
+ ; just wast a bit of space and time to try avoid the enable bit tainting the results of the disable.
+ xor ebx, ebx
+ rdtsc
+ add ebx, eax
+ rdtsc
+ add ebx, edx
+ rdtsc
+ sub ebx, eax
+
+ RDTSC_EDI_ESI
+ mov ebx, cr0
+ and ebx, ~X86_CR0_WRITE_PROTECT
+ mov cr0, ebx
+ RDTSC_STORE_INSTR_PRF_RESULT
+
+ EPILOG
+ ret
+ENDPROC tstWPDisable
+
+
+
+
+;;
+; Generate a #PF accessing page 0 in
+;
+BEGINPROC tstPFR0
+ PROLOG
+
+ COPY_STACK_ESP_EBP_RESUME tstPFR0_Resume
+
+ rdtsc
+ mov [ebx + TSTMICRO.u64TSCR0Start ], eax
+ mov [ebx + TSTMICRO.u64TSCR0Start + 4], edx
+ xor ebx, ebx ; The NULL pointer.
+ xor ecx, ecx
+ xor ebp, ebp ; ebp:ecx - Rx enter time (0:0).
+ RDTSC_EDI_ESI ; edi:esi - Before trap.
+ mov [ebx], ebx ; traps - 2 bytes
+
+ RDTSC_EDI_ESI ; edi:esi - Rx entry time.
+ int 42h ; we're done.
+
+tstPFR0_Resume:
+ EPILOG
+ ret
+ENDPROC tstPFR0
+
+
+
+;;
+; Generate a #PF accessing page 0 in ring-1
+;
+BEGINPROC_EXPORTED tstPFR1
+ PROLOG
+
+ COPY_STACK_ESP_EBP_RESUME tstPFR1_Resume
+
+ ; Setup iret to execute r1 code.
+ mov eax, 02069h ; load ds and es with R1 selectors.
+ mov es, eax
+ mov ds, eax
+ push dword 01069h ; ss
+ push dword [ebx + TSTMICRO.RCPtrStack] ; esp
+ push dword 0000h ; eflags
+ push dword 01061h ; cs
+ push tstPTR1_R1 ; eip
+
+ rdtsc
+ mov [ebx + TSTMICRO.u64TSCR0Start ], eax
+ mov [ebx + TSTMICRO.u64TSCR0Start + 4], edx
+ iret
+
+ ; R1 code
+tstPTR1_R1:
+ RDTSC_ECX_EBP ; ebp:ecx - Rx enter time (0:0).
+ xor ebx, ebx
+ RDTSC_EDI_ESI ; edi:esi - Before trap.
+ mov [ebx], ebx ; traps - 2 bytes
+
+ RDTSC_EDI_ESI ; edi:esi - Rx entry time.
+ int 42h ; we're done.
+
+ ; Resume in R0
+tstPFR1_Resume:
+ EPILOG
+ ret
+ENDPROC tstPFR1
+
+
+;;
+; Generate a #PF accessing page 0 in ring-2
+;
+BEGINPROC_EXPORTED tstPFR2
+ PROLOG
+
+ COPY_STACK_ESP_EBP_RESUME tstPFR2_Resume
+
+ ; Setup iret to execute r2 code.
+ mov eax, 0206ah ; load ds and es with R2 selectors.
+ mov es, eax
+ mov ds, eax
+ push 0206ah ; ss
+ push dword [ebx + TSTMICRO.RCPtrStack] ; esp
+ push dword 0000h ; eflags
+ push 02062h ; cs
+ push tstPTR2_R2 ; eip
+
+ rdtsc
+ mov [ebx + TSTMICRO.u64TSCR0Start ], eax
+ mov [ebx + TSTMICRO.u64TSCR0Start + 4], edx
+ iret
+
+ ; R2 code
+tstPTR2_R2:
+ RDTSC_ECX_EBP ; ebp:ecx - Rx enter time (0:0).
+ xor ebx, ebx
+ RDTSC_EDI_ESI ; edi:esi - Before trap.
+ mov [ebx], ebx ; traps - 2 bytes
+
+ RDTSC_EDI_ESI ; edi:esi - Rx entry time.
+ int 42h ; we're done.
+
+ ; Resume in R0
+tstPFR2_Resume:
+ EPILOG
+ ret
+ENDPROC tstPFR2
+
+
+;;
+; Generate a #PF accessing page 0 in ring-3
+;
+BEGINPROC_EXPORTED tstPFR3
+ PROLOG
+
+ COPY_STACK_ESP_EBP_RESUME tstPFR3_Resume
+
+ ; Setup iret to execute r3 code.
+ mov eax, 0306bh ; load ds and es with R3 selectors.
+ mov es, eax
+ mov ds, eax
+ push 0306bh ; ss
+ push dword [ebx + TSTMICRO.RCPtrStack] ; esp
+ push dword 0000h ; eflags
+ push 03063h ; cs
+ push tstPTR3_R3 ; eip
+
+ rdtsc
+ mov [ebx + TSTMICRO.u64TSCR0Start ], eax
+ mov [ebx + TSTMICRO.u64TSCR0Start + 4], edx
+ iret
+
+ ; R3 code
+tstPTR3_R3:
+ RDTSC_ECX_EBP ; ebp:ecx - Rx enter time (0:0).
+ xor ebx, ebx
+ RDTSC_EDI_ESI ; edi:esi - Before trap.
+ mov [ebx], ebx ; traps - 2 bytes
+
+ RDTSC_EDI_ESI ; edi:esi - Rx entry time.
+ int 42h ; we're done.
+
+ ; Resume in R0
+tstPFR3_Resume:
+ EPILOG
+ ret
+ENDPROC tstPFR3
+
+
+
+;;
+; Trap handler with error code - share code with tstTrapHandler.
+align 8
+BEGINPROC_EXPORTED tstTrapHandlerNoErr
+ rdtsc
+ push 0ffffffffh
+ jmp tstTrapHandler_Common
+
+;;
+; Trap handler with error code.
+; 14 SS (only if ring transition.)
+; 10 ESP (only if ring transition.)
+; c EFLAGS
+; 8 CS
+; 4 EIP
+; 0 Error code. (~0 for vectors which don't take an error code.)
+;; @todo This is a bit of a mess - clean up!
+align 8
+BEGINPROC tstTrapHandler
+ ; get the time
+ rdtsc
+
+tstTrapHandler_Common:
+ xchg ecx, eax
+ mov eax, -1 ; return code
+
+ ; disable WP
+ mov ebx, cr0
+ and ebx, ~X86_CR0_WRITE_PROTECT
+ mov cr0, ebx
+
+ ; first hit, or final hit?
+ mov ebx, [gpTst]
+ inc dword [ebx + TSTMICRO.cHits]
+ cmp dword [ebx + TSTMICRO.cHits], byte 1
+ jne near tstTrapHandler_Fault
+
+ ; save the results - edx:ecx == r0 enter time, edi:esi == before trap, ecx:ebp == Rx enter time.
+
+ mov [ebx + TSTMICRO.u64TSCR0Enter ], ecx
+ mov [ebx + TSTMICRO.u64TSCR0Enter + 4], edx
+
+ ;mov [ebx + TSTMICRO.u64TSCRxStart ], ecx
+ ;mov [ebx + TSTMICRO.u64TSCRxStart + 4], ebp
+
+ mov [ebx + TSTMICRO.u64TSCRxStart ], esi
+ mov [ebx + TSTMICRO.u64TSCRxStart + 4], edi
+
+ mov eax, cr2
+ mov [ebx + TSTMICRO.u32CR2], eax
+ mov eax, [esp + 0]
+ mov [ebx + TSTMICRO.u32ErrCd], eax
+ mov eax, [esp + 4]
+ mov [ebx + TSTMICRO.u32EIP], eax
+
+ ;
+ ; Advance the EIP and resume.
+ ;
+ mov ecx, [ebx + TSTMICRO.offEIPAdd]
+ add [esp + 4], ecx ; return eip + offEIPAdd
+
+ add esp, byte 4 ; skip the err code
+
+ ; take the timestamp before resuming.
+ rdtsc
+ mov [ebx + TSTMICRO.u64TSCR0Exit ], eax
+ mov [ebx + TSTMICRO.u64TSCR0Exit + 4], edx
+ iret
+
+
+tstTrapHandler_Fault:
+ cld
+
+%if 0 ; this has been broken for quite some time
+ ;
+ ; Setup CPUMCTXCORE frame
+ ;
+ push dword [esp + 4h + 0h] ; 3ch - eip
+ push dword [esp + 0ch + 4h] ; 38h - eflags
+ ;;;;push dword [esp + 08h + 8h] ; 34h - cs
+ push cs;want disasm
+ push ds ; c ; 30h
+ push es ;10 ; 2ch
+ push fs ;14 ; 28h
+ push gs ;18 ; 24h
+ push dword [esp + 14h + 1ch] ; 20h - ss
+ push dword [esp + 10h + 20h] ; 1ch - esp
+ push ecx ;24 ; 18h
+ push edx ;28 ; 14h
+ push ebx ;2c ; 10h
+ push eax ;30 ; ch
+ push ebp ;34 ; 8h
+ push esi ;38 ; 4h
+ push edi ;3c ; 0h
+ ;40
+%endif
+
+ test byte [esp + 0ch + 4h], 3h ; check CPL of the cs selector
+ jmp short tstTrapHandler_Fault_Hyper ;; @todo
+ jz short tstTrapHandler_Fault_Hyper
+tstTrapHandler_Fault_Guest:
+ mov ecx, esp
+ mov edx, IMP(g_VM)
+ mov eax, VERR_TRPM_DONT_PANIC
+ call [edx + VM.pfnVMMRCToHostAsm]
+ jmp short tstTrapHandler_Fault_Guest
+
+tstTrapHandler_Fault_Hyper:
+ ; fix ss:esp.
+ lea ebx, [esp + 14h + 040h] ; calc esp at trap
+ mov [esp + CPUMCTXCORE.esp], ebx; update esp in register frame
+ mov [esp + CPUMCTXCORE.ss.Sel], ss ; update ss in register frame
+
+ mov ecx, esp
+ mov edx, IMP(g_VM)
+ mov eax, VERR_TRPM_DONT_PANIC
+ call [edx + VM.pfnVMMRCToHostAsm]
+ jmp short tstTrapHandler_Fault_Hyper
+
+BEGINPROC tstInterrupt42
+ rdtsc
+ push byte 0
+ mov ecx, eax ; low ts
+ xor eax, eax ; return code.
+
+ ; save the results - edx:ecx == r0 end time, edi:esi == Rx end time.
+ mov [ebx + TSTMICRO.u64TSCR0End ], ecx
+ mov [ebx + TSTMICRO.u64TSCR0End + 4], edx
+
+ mov [ebx + TSTMICRO.u64TSCRxEnd ], esi
+ mov [ebx + TSTMICRO.u64TSCRxEnd + 4], edi
+
+ ;
+ ; Restore the IDT and stack, and resume the testcase code.
+ ;
+ lidt [ebx + TSTMICRO.OriginalIDTR]
+
+ mov edi, esp
+ and edi, ~0fffh
+ mov esi, gabStackCopy
+ mov ecx, 01000h / 4
+ mov esp, [gESPResume]
+ mov ebp, [gEBPResume]
+ rep movsd
+
+ jmp [gEIPResume]
+
+ENDPROC tstTrapHandler
+
+EXPORTEDNAME tstMicroRCAsmEnd
diff --git a/src/VBox/VMM/testcase/tstPDMAsyncCompletion.cpp b/src/VBox/VMM/testcase/tstPDMAsyncCompletion.cpp
new file mode 100644
index 00000000..4ef4a33f
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstPDMAsyncCompletion.cpp
@@ -0,0 +1,274 @@
+/* $Id: tstPDMAsyncCompletion.cpp $ */
+/** @file
+ * PDM Asynchronous Completion Testcase.
+ *
+ * This testcase is for testing the async completion interface.
+ * It implements a file copy program which uses the interface to copy the data.
+ *
+ * Use: ./tstPDMAsyncCompletion <source> <destination>
+ */
+
+/*
+ * Copyright (C) 2008-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
+
+#include "VMInternal.h" /* UVM */
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/pdmasynccompletion.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/cpum.h>
+#include <iprt/errcore.h>
+#include <VBox/log.h>
+#include <VBox/vmm/pdmapi.h>
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/file.h>
+#include <iprt/initterm.h>
+#include <iprt/semaphore.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+
+#define TESTCASE "tstPDMAsyncCompletion"
+
+/*
+ * Number of simultaneous active tasks.
+ */
+#define NR_TASKS 80
+#define BUFFER_SIZE (64*_1K)
+
+/* Buffers to store data in .*/
+uint8_t *g_AsyncCompletionTasksBuffer[NR_TASKS];
+PPDMASYNCCOMPLETIONTASK g_AsyncCompletionTasks[NR_TASKS];
+volatile uint32_t g_cTasksLeft;
+RTSEMEVENT g_FinishedEventSem;
+
+static DECLCALLBACK(void) AsyncTaskCompleted(PVM pVM, void *pvUser, void *pvUser2, int rc)
+{
+ RT_NOREF4(pVM, pvUser, pvUser2, rc);
+ LogFlow((TESTCASE ": %s: pVM=%p pvUser=%p pvUser2=%p\n", __FUNCTION__, pVM, pvUser, pvUser2));
+
+ uint32_t cTasksStillLeft = ASMAtomicDecU32(&g_cTasksLeft);
+
+ if (!cTasksStillLeft)
+ {
+ /* All tasks processed. Wakeup main. */
+ RTSemEventSignal(g_FinishedEventSem);
+ }
+}
+
+/**
+ * Entry point.
+ */
+extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
+{
+ RT_NOREF1(envp);
+ int rcRet = 0; /* error count */
+ PPDMASYNCCOMPLETIONENDPOINT pEndpointSrc, pEndpointDst;
+
+ RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB);
+
+ if (argc != 3)
+ {
+ RTPrintf(TESTCASE ": Usage is ./tstPDMAsyncCompletion <source> <dest>\n");
+ return 1;
+ }
+
+ PVM pVM;
+ PUVM pUVM;
+ int rc = VMR3Create(1, NULL, NULL, NULL, NULL, NULL, &pVM, &pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Little hack to avoid the VM_ASSERT_EMT assertion.
+ */
+ RTTlsSet(pVM->pUVM->vm.s.idxTLS, &pVM->pUVM->aCpus[0]);
+ pVM->pUVM->aCpus[0].pUVM = pVM->pUVM;
+ pVM->pUVM->aCpus[0].vm.s.NativeThreadEMT = RTThreadNativeSelf();
+
+ /*
+ * Create the template.
+ */
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate;
+ rc = PDMR3AsyncCompletionTemplateCreateInternal(pVM, &pTemplate, AsyncTaskCompleted, NULL, "Test");
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf(TESTCASE ": Error while creating the template!! rc=%d\n", rc);
+ return 1;
+ }
+
+ /*
+ * Create event semaphore.
+ */
+ rc = RTSemEventCreate(&g_FinishedEventSem);
+ AssertRC(rc);
+
+ /*
+ * Create the temporary buffers.
+ */
+ for (unsigned i=0; i < NR_TASKS; i++)
+ {
+ g_AsyncCompletionTasksBuffer[i] = (uint8_t *)RTMemAllocZ(BUFFER_SIZE);
+ if (!g_AsyncCompletionTasksBuffer[i])
+ {
+ RTPrintf(TESTCASE ": out of memory!\n");
+ return ++rcRet;
+ }
+ }
+
+ /* Create the destination as the async completion API can't do this. */
+ RTFILE FileTmp;
+ rc = RTFileOpen(&FileTmp, argv[2], RTFILE_O_READWRITE | RTFILE_O_OPEN_CREATE | RTFILE_O_DENY_NONE);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf(TESTCASE ": Error while creating the destination!! rc=%d\n", rc);
+ return ++rcRet;
+ }
+ RTFileClose(FileTmp);
+
+ /* Create our file endpoint */
+ rc = PDMR3AsyncCompletionEpCreateForFile(&pEndpointSrc, argv[1], 0, pTemplate);
+ if (RT_SUCCESS(rc))
+ {
+ rc = PDMR3AsyncCompletionEpCreateForFile(&pEndpointDst, argv[2], 0, pTemplate);
+ if (RT_SUCCESS(rc))
+ {
+ PDMR3PowerOn(pVM);
+
+ /* Wait for all threads to finish initialization. */
+ RTThreadSleep(100);
+
+ int fReadPass = true;
+ uint64_t cbSrc;
+ size_t offSrc = 0;
+ size_t offDst = 0;
+ uint32_t cTasksUsed = 0;
+
+ rc = PDMR3AsyncCompletionEpGetSize(pEndpointSrc, &cbSrc);
+ if (RT_SUCCESS(rc))
+ {
+ /* Copy the data. */
+ for (;;)
+ {
+ if (fReadPass)
+ {
+ cTasksUsed = (BUFFER_SIZE * NR_TASKS) <= (cbSrc - offSrc)
+ ? NR_TASKS
+ : ((cbSrc - offSrc) / BUFFER_SIZE)
+ + ((cbSrc - offSrc) % BUFFER_SIZE) > 0
+ ? 1
+ : 0;
+
+ g_cTasksLeft = cTasksUsed;
+
+ for (uint32_t i = 0; i < cTasksUsed; i++)
+ {
+ size_t cbRead = ((size_t)offSrc + BUFFER_SIZE) <= cbSrc ? BUFFER_SIZE : cbSrc - offSrc;
+ RTSGSEG DataSeg;
+
+ DataSeg.pvSeg = g_AsyncCompletionTasksBuffer[i];
+ DataSeg.cbSeg = cbRead;
+
+ rc = PDMR3AsyncCompletionEpRead(pEndpointSrc, offSrc, &DataSeg, 1, cbRead, NULL,
+ &g_AsyncCompletionTasks[i]);
+ AssertRC(rc);
+ offSrc += cbRead;
+ if (offSrc == cbSrc)
+ break;
+ }
+ }
+ else
+ {
+ g_cTasksLeft = cTasksUsed;
+
+ for (uint32_t i = 0; i < cTasksUsed; i++)
+ {
+ size_t cbWrite = (offDst + BUFFER_SIZE) <= cbSrc ? BUFFER_SIZE : cbSrc - offDst;
+ RTSGSEG DataSeg;
+
+ DataSeg.pvSeg = g_AsyncCompletionTasksBuffer[i];
+ DataSeg.cbSeg = cbWrite;
+
+ rc = PDMR3AsyncCompletionEpWrite(pEndpointDst, offDst, &DataSeg, 1, cbWrite, NULL,
+ &g_AsyncCompletionTasks[i]);
+ AssertRC(rc);
+ offDst += cbWrite;
+ if (offDst == cbSrc)
+ break;
+ }
+ }
+
+ rc = RTSemEventWait(g_FinishedEventSem, RT_INDEFINITE_WAIT);
+ AssertRC(rc);
+
+ if (!fReadPass && (offDst == cbSrc))
+ break;
+ else if (fReadPass)
+ fReadPass = false;
+ else
+ {
+ cTasksUsed = 0;
+ fReadPass = true;
+ }
+ }
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": Error querying size of the endpoint!! rc=%d\n", rc);
+ rcRet++;
+ }
+
+ PDMR3PowerOff(pVM);
+ PDMR3AsyncCompletionEpClose(pEndpointDst);
+ }
+ PDMR3AsyncCompletionEpClose(pEndpointSrc);
+ }
+
+ rc = VMR3Destroy(pUVM);
+ AssertMsg(rc == VINF_SUCCESS, ("%s: Destroying VM failed rc=%Rrc!!\n", __FUNCTION__, rc));
+ VMR3ReleaseUVM(pUVM);
+
+ /*
+ * Clean up.
+ */
+ for (uint32_t i = 0; i < NR_TASKS; i++)
+ {
+ RTMemFree(g_AsyncCompletionTasksBuffer[i]);
+ }
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": failed to create VM!! rc=%Rrc\n", rc);
+ rcRet++;
+ }
+
+ return rcRet;
+}
+
+
+#if !defined(VBOX_WITH_HARDENING) || !defined(RT_OS_WINDOWS)
+/**
+ * Main entry point.
+ */
+int main(int argc, char **argv, char **envp)
+{
+ return TrustedMain(argc, argv, envp);
+}
+#endif
+
diff --git a/src/VBox/VMM/testcase/tstPDMAsyncCompletionStress.cpp b/src/VBox/VMM/testcase/tstPDMAsyncCompletionStress.cpp
new file mode 100644
index 00000000..3233d33b
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstPDMAsyncCompletionStress.cpp
@@ -0,0 +1,648 @@
+/* $Id: tstPDMAsyncCompletionStress.cpp $ */
+/** @file
+ * PDM Asynchronous Completion Stresstest.
+ *
+ * This testcase is for stress testing the async completion interface.
+ */
+
+/*
+ * Copyright (C) 2008-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define LOG_GROUP LOG_GROUP_PDM_ASYNC_COMPLETION
+
+#include "VMInternal.h" /* UVM */
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/pdmasynccompletion.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <VBox/vmm/pdmapi.h>
+#include <VBox/vmm/pdmthread.h>
+#include <iprt/alloc.h>
+#include <iprt/asm.h>
+#include <iprt/assert.h>
+#include <iprt/file.h>
+#include <iprt/initterm.h>
+#include <iprt/semaphore.h>
+#include <iprt/rand.h>
+#include <iprt/string.h>
+#include <iprt/path.h>
+#include <iprt/stream.h>
+#include <iprt/thread.h>
+#include <iprt/param.h>
+#include <iprt/message.h>
+
+#define TESTCASE "tstPDMAsyncCompletionStress"
+
+#if 0
+/** Number of simultaneous open endpoints for reading and writing. */
+#define NR_OPEN_ENDPOINTS 10
+/** Test pattern size. */
+#define TEST_PATTERN_SIZE (100*_1M)
+/** Minimum file size. */
+#define FILE_SIZE_MIN (100 * _1M)
+/** Maximum file size. */
+#define FILE_SIZE_MAX (10000UL * _1M)
+/** Minimum segment size. */
+#define SEGMENT_SIZE_MIN (512)
+/** Maximum segment size. */
+#define SEGMENT_SIZE_MAX (TEST_PATTERN_SIZE)
+/** Maximum number of active tasks. */
+#define TASK_ACTIVE_MAX (1024)
+/** Maximum size of a transfer. */
+#define TASK_TRANSFER_SIZE_MAX (10*_1M)
+#else
+/** Number of simultaneous open endpoints for reading and writing. */
+#define NR_OPEN_ENDPOINTS 5
+/** Test pattern size. */
+#define TEST_PATTERN_SIZE (10*_1M)
+/** Minimum file size. */
+#define FILE_SIZE_MIN (100 * _1M)
+/** Maximum file size. */
+#define FILE_SIZE_MAX (1000UL * _1M)
+/** Minimum segment size. */
+#define SEGMENT_SIZE_MIN (512)
+/** Maximum segment size. */
+#define SEGMENT_SIZE_MAX (TEST_PATTERN_SIZE)
+/** Maximum number of active tasks. */
+#define TASK_ACTIVE_MAX (1)
+/** Maximum size of a transfer. */
+#define TASK_TRANSFER_SIZE_MAX (_1M)
+#endif
+
+/**
+ * Structure defining a file segment.
+ */
+typedef struct PDMACTESTFILESEG
+{
+ /** Start offset in the file. */
+ RTFOFF off;
+ /** Size of the segment. */
+ size_t cbSegment;
+ /** Pointer to the start of the data in the test pattern used for the segment. */
+ uint8_t *pbData;
+} PDMACTESTFILESEG, *PPDMACTESTFILESEG;
+
+/**
+ * Structure defining a I/O task.
+ */
+typedef struct PDMACTESTFILETASK
+{
+ /** Flag whether the task is currently active. */
+ bool fActive;
+ /** Flag whether this is a write. */
+ bool fWrite;
+ /** Start offset. */
+ RTFOFF off;
+ /** Data segment */
+ RTSGSEG DataSeg;
+ /** Task handle. */
+ PPDMASYNCCOMPLETIONTASK hTask;
+} PDMACTESTFILETASK, *PPDMACTESTFILETASK;
+
+/**
+ * Structure defining a test file.
+ */
+typedef struct PDMACTESTFILE
+{
+ /** The PDM async completion endpoint handle. */
+ PPDMASYNCCOMPLETIONENDPOINT hEndpoint;
+ /** Template used for this file. */
+ PPDMASYNCCOMPLETIONTEMPLATE pTemplate;
+ /** Maximum size of the file. */
+ uint64_t cbFileMax;
+ /** Current size of the file. */
+ uint64_t cbFileCurr;
+ /** Size of a file segment. */
+ size_t cbFileSegment;
+ /** Maximum number of segments. */
+ size_t cSegments;
+ /** Pointer to the array describing how the file is assembled
+ * of the test pattern. Used for comparing read data to ensure
+ * that no corruption occurred.
+ */
+ PPDMACTESTFILESEG paSegs;
+ /** Maximum number of active tasks for this endpoint. */
+ uint32_t cTasksActiveMax;
+ /** Number of current active tasks. */
+ volatile uint32_t cTasksActiveCurr;
+ /** Pointer to the array of task. */
+ PPDMACTESTFILETASK paTasks;
+ /** I/O thread handle. */
+ PPDMTHREAD hThread;
+ /** Flag whether the thread should terminate. */
+ bool fRunning;
+} PDMACTESTFILE, *PPDMACTESTFILE;
+
+/** Buffer storing the random test pattern. */
+uint8_t *g_pbTestPattern = NULL;
+/** Size of the test pattern. */
+size_t g_cbTestPattern;
+/** Array holding test files. */
+PDMACTESTFILE g_aTestFiles[NR_OPEN_ENDPOINTS];
+
+static DECLCALLBACK(void) tstPDMACStressTestFileTaskCompleted(PVM pVM, void *pvUser, void *pvUser2, int rcReq);
+
+static void tstPDMACStressTestFileVerify(PPDMACTESTFILE pTestFile, PPDMACTESTFILETASK pTestTask)
+{
+ size_t cbLeft = pTestTask->DataSeg.cbSeg;
+ RTFOFF off = pTestTask->off;
+ uint8_t *pbBuf = (uint8_t *)pTestTask->DataSeg.pvSeg;
+
+ while (cbLeft)
+ {
+ size_t cbCompare;
+ size_t iSeg = off / pTestFile->cbFileSegment;
+ PPDMACTESTFILESEG pSeg = &pTestFile->paSegs[iSeg];
+ uint8_t *pbTestPattern;
+ unsigned offSeg = off - pSeg->off;
+
+ cbCompare = RT_MIN(cbLeft, pSeg->cbSegment - offSeg);
+ pbTestPattern = pSeg->pbData + offSeg;
+
+ if (memcmp(pbBuf, pbTestPattern, cbCompare))
+ {
+ unsigned idx = 0;
+
+ while ( (idx < cbCompare)
+ && (pbBuf[idx] == pbTestPattern[idx]))
+ idx++;
+
+ RTMsgError("Unexpected data for off=%RTfoff size=%u\n"
+ "Expected %c got %c\n",
+ pTestTask->off + idx, pTestTask->DataSeg.cbSeg,
+ pbTestPattern[idx], pbBuf[idx]);
+ RTAssertDebugBreak();
+ }
+
+ pbBuf += cbCompare;
+ off += cbCompare;
+ cbLeft -= cbCompare;
+ }
+}
+
+static void tstPDMACStressTestFileFillBuffer(PPDMACTESTFILE pTestFile, PPDMACTESTFILETASK pTestTask)
+{
+ uint8_t *pbBuf = (uint8_t *)pTestTask->DataSeg.pvSeg;
+ size_t cbLeft = pTestTask->DataSeg.cbSeg;
+ RTFOFF off = pTestTask->off;
+
+ Assert(pTestTask->fWrite && pTestTask->fActive);
+
+ while (cbLeft)
+ {
+ size_t cbFill;
+ size_t iSeg = off / pTestFile->cbFileSegment;
+ PPDMACTESTFILESEG pSeg = &pTestFile->paSegs[iSeg];
+ uint8_t *pbTestPattern;
+ unsigned offSeg = off - pSeg->off;
+
+ cbFill = RT_MIN(cbLeft, pSeg->cbSegment - offSeg);
+ pbTestPattern = pSeg->pbData + offSeg;
+
+ memcpy(pbBuf, pbTestPattern, cbFill);
+
+ pbBuf += cbFill;
+ off += cbFill;
+ cbLeft -= cbFill;
+ }
+}
+
+static int tstPDMACStressTestFileWrite(PPDMACTESTFILE pTestFile, PPDMACTESTFILETASK pTestTask)
+{
+ int rc = VINF_SUCCESS;
+
+ Assert(!pTestTask->fActive);
+
+ pTestTask->fActive = true;
+ pTestTask->fWrite = true;
+ pTestTask->DataSeg.cbSeg = RTRandU32Ex(512, TASK_TRANSFER_SIZE_MAX) & ~511;
+
+ uint64_t offMax;
+
+ /* Did we reached the maximum file size */
+ if (pTestFile->cbFileCurr < pTestFile->cbFileMax)
+ {
+ offMax = (pTestFile->cbFileMax - pTestFile->cbFileCurr) < pTestTask->DataSeg.cbSeg
+ ? pTestFile->cbFileMax - pTestTask->DataSeg.cbSeg
+ : pTestFile->cbFileCurr;
+ }
+ else
+ offMax = pTestFile->cbFileMax - pTestTask->DataSeg.cbSeg;
+
+ uint64_t offMin;
+
+ /*
+ * If we reached the maximum file size write in the whole file
+ * otherwise we will enforce the range for random offsets to let it grow
+ * more quickly.
+ */
+ if (pTestFile->cbFileCurr == pTestFile->cbFileMax)
+ offMin = 0;
+ else
+ offMin = RT_MIN(pTestFile->cbFileCurr, offMax);
+
+
+ pTestTask->off = RTRandU64Ex(offMin, offMax) & ~511;
+
+ /* Set new file size of required */
+ if ((uint64_t)pTestTask->off + pTestTask->DataSeg.cbSeg > pTestFile->cbFileCurr)
+ pTestFile->cbFileCurr = pTestTask->off + pTestTask->DataSeg.cbSeg;
+
+ AssertMsg(pTestFile->cbFileCurr <= pTestFile->cbFileMax,
+ ("Current file size (%llu) exceeds final size (%llu)\n",
+ pTestFile->cbFileCurr, pTestFile->cbFileMax));
+
+ /* Allocate data buffer. */
+ pTestTask->DataSeg.pvSeg = RTMemAlloc(pTestTask->DataSeg.cbSeg);
+ if (!pTestTask->DataSeg.pvSeg)
+ return VERR_NO_MEMORY;
+
+ /* Fill data into buffer. */
+ tstPDMACStressTestFileFillBuffer(pTestFile, pTestTask);
+
+ /* Engage */
+ rc = PDMR3AsyncCompletionEpWrite(pTestFile->hEndpoint, pTestTask->off,
+ &pTestTask->DataSeg, 1,
+ pTestTask->DataSeg.cbSeg,
+ pTestTask,
+ &pTestTask->hTask);
+
+ return rc;
+}
+
+static int tstPDMACStressTestFileRead(PPDMACTESTFILE pTestFile, PPDMACTESTFILETASK pTestTask)
+{
+ int rc = VINF_SUCCESS;
+
+ Assert(!pTestTask->fActive);
+
+ pTestTask->fActive = true;
+ pTestTask->fWrite = false;
+ pTestTask->DataSeg.cbSeg = RTRandU32Ex(1, RT_MIN(pTestFile->cbFileCurr, TASK_TRANSFER_SIZE_MAX));
+
+ AssertMsg(pTestFile->cbFileCurr >= pTestTask->DataSeg.cbSeg, ("Impossible\n"));
+ pTestTask->off = RTRandU64Ex(0, pTestFile->cbFileCurr - pTestTask->DataSeg.cbSeg);
+
+ /* Allocate data buffer. */
+ pTestTask->DataSeg.pvSeg = RTMemAlloc(pTestTask->DataSeg.cbSeg);
+ if (!pTestTask->DataSeg.pvSeg)
+ return VERR_NO_MEMORY;
+
+ /* Engage */
+ rc = PDMR3AsyncCompletionEpRead(pTestFile->hEndpoint, pTestTask->off,
+ &pTestTask->DataSeg, 1,
+ pTestTask->DataSeg.cbSeg,
+ pTestTask,
+ &pTestTask->hTask);
+
+ return rc;
+}
+
+/**
+ * Returns true with the given chance in percent.
+ *
+ * @returns true or false
+ * @param iPercentage The percentage of the chance to return true.
+ */
+static bool tstPDMACTestIsTrue(int iPercentage)
+{
+ int uRnd = RTRandU32Ex(0, 100);
+
+ return (uRnd <= iPercentage); /* This should be enough for our purpose */
+}
+
+static DECLCALLBACK(int) tstPDMACTestFileThread(PVM pVM, PPDMTHREAD pThread)
+{
+ PPDMACTESTFILE pTestFile = (PPDMACTESTFILE)pThread->pvUser;
+ int iWriteChance = 100; /* Chance to get a write task in percent. */
+ uint32_t cTasksStarted = 0;
+ int rc = VINF_SUCCESS;
+
+ if (pThread->enmState == PDMTHREADSTATE_INITIALIZING)
+ return VINF_SUCCESS;
+
+ while (pTestFile->fRunning)
+ {
+ unsigned iTaskCurr = 0;
+
+
+ /* Fill all tasks */
+ while ( (pTestFile->cTasksActiveCurr < pTestFile->cTasksActiveMax)
+ && (iTaskCurr < pTestFile->cTasksActiveMax))
+ {
+ PPDMACTESTFILETASK pTask = &pTestFile->paTasks[iTaskCurr];
+
+ if (!pTask->fActive)
+ {
+ /* Read or write task? */
+ bool fWrite = tstPDMACTestIsTrue(iWriteChance);
+
+ ASMAtomicIncU32(&pTestFile->cTasksActiveCurr);
+
+ if (fWrite)
+ rc = tstPDMACStressTestFileWrite(pTestFile, pTask);
+ else
+ rc = tstPDMACStressTestFileRead(pTestFile, pTask);
+
+ if (rc != VINF_AIO_TASK_PENDING)
+ tstPDMACStressTestFileTaskCompleted(pVM, pTask, pTestFile, rc);
+
+ cTasksStarted++;
+ }
+
+ iTaskCurr++;
+ }
+
+ /*
+ * Recalc write chance. The bigger the file the lower the chance to have a write.
+ * The minimum chance is 33 percent.
+ */
+ iWriteChance = 100 - (int)(((float)100.0 / pTestFile->cbFileMax) * (float)pTestFile->cbFileCurr);
+ iWriteChance = RT_MAX(33, iWriteChance);
+
+ /* Wait a random amount of time. (1ms - 100ms) */
+ RTThreadSleep(RTRandU32Ex(1, 100));
+ }
+
+ /* Wait for the rest to complete. */
+ while (pTestFile->cTasksActiveCurr)
+ RTThreadSleep(250);
+
+ RTPrintf("Thread exiting: processed %u tasks\n", cTasksStarted);
+ return rc;
+}
+
+static DECLCALLBACK(void) tstPDMACStressTestFileTaskCompleted(PVM pVM, void *pvUser, void *pvUser2, int rcReq)
+{
+ PPDMACTESTFILE pTestFile = (PPDMACTESTFILE)pvUser2;
+ PPDMACTESTFILETASK pTestTask = (PPDMACTESTFILETASK)pvUser;
+ NOREF(pVM); NOREF(rcReq);
+
+ if (pTestTask->fWrite)
+ {
+ /** @todo Do something sensible here. */
+ }
+ else
+ {
+ tstPDMACStressTestFileVerify(pTestFile, pTestTask); /* Will assert if it fails */
+ }
+
+ RTMemFree(pTestTask->DataSeg.pvSeg);
+ pTestTask->fActive = false;
+ AssertMsg(pTestFile->cTasksActiveCurr > 0, ("Trying to complete a non active task\n"));
+ ASMAtomicDecU32(&pTestFile->cTasksActiveCurr);
+}
+
+/**
+ * Sets up a test file creating the I/O thread.
+ *
+ * @returns VBox status code.
+ * @param pVM Pointer to the shared VM instance structure.
+ * @param pTestFile Pointer to the uninitialized test file structure.
+ * @param iTestId Unique test id.
+ */
+static int tstPDMACStressTestFileOpen(PVM pVM, PPDMACTESTFILE pTestFile, unsigned iTestId)
+{
+ int rc = VERR_NO_MEMORY;
+
+ /* Size is a multiple of 512 */
+ pTestFile->cbFileMax = RTRandU64Ex(FILE_SIZE_MIN, FILE_SIZE_MAX) & ~(511UL);
+ pTestFile->cbFileCurr = 0;
+ pTestFile->cbFileSegment = RTRandU32Ex(SEGMENT_SIZE_MIN, RT_MIN(pTestFile->cbFileMax, SEGMENT_SIZE_MAX)) & ~((size_t)511);
+
+ Assert(pTestFile->cbFileMax >= pTestFile->cbFileSegment);
+
+ /* Set up the segments array. */
+ pTestFile->cSegments = pTestFile->cbFileMax / pTestFile->cbFileSegment;
+ pTestFile->cSegments += ((pTestFile->cbFileMax % pTestFile->cbFileSegment) > 0) ? 1 : 0;
+
+ pTestFile->paSegs = (PPDMACTESTFILESEG)RTMemAllocZ(pTestFile->cSegments * sizeof(PDMACTESTFILESEG));
+ if (pTestFile->paSegs)
+ {
+ /* Init the segments */
+ for (unsigned i = 0; i < pTestFile->cSegments; i++)
+ {
+ PPDMACTESTFILESEG pSeg = &pTestFile->paSegs[i];
+
+ pSeg->off = (RTFOFF)i * pTestFile->cbFileSegment;
+ pSeg->cbSegment = pTestFile->cbFileSegment;
+
+ /* Let the buffer point to a random position in the test pattern. */
+ uint32_t offTestPattern = RTRandU64Ex(0, g_cbTestPattern - pSeg->cbSegment);
+
+ pSeg->pbData = g_pbTestPattern + offTestPattern;
+ }
+
+ /* Init task array. */
+ pTestFile->cTasksActiveMax = RTRandU32Ex(1, TASK_ACTIVE_MAX);
+ pTestFile->paTasks = (PPDMACTESTFILETASK)RTMemAllocZ(pTestFile->cTasksActiveMax * sizeof(PDMACTESTFILETASK));
+ if (pTestFile->paTasks)
+ {
+ /* Create the template */
+ char szDesc[256];
+
+ RTStrPrintf(szDesc, sizeof(szDesc), "Template-%d", iTestId);
+ rc = PDMR3AsyncCompletionTemplateCreateInternal(pVM, &pTestFile->pTemplate, tstPDMACStressTestFileTaskCompleted,
+ pTestFile, szDesc);
+ if (RT_SUCCESS(rc))
+ {
+ /* Open the endpoint now. Because async completion endpoints cannot create files we have to do it before. */
+ char szFile[RTPATH_MAX];
+
+ RTStrPrintf(szFile, sizeof(szFile), "tstPDMAsyncCompletionStress-%d.tmp", iTestId);
+
+ RTFILE FileTmp;
+ rc = RTFileOpen(&FileTmp, szFile, RTFILE_O_READWRITE | RTFILE_O_CREATE | RTFILE_O_DENY_NONE);
+ if (RT_SUCCESS(rc))
+ {
+ RTFileClose(FileTmp);
+
+ rc = PDMR3AsyncCompletionEpCreateForFile(&pTestFile->hEndpoint, szFile, 0, pTestFile->pTemplate);
+ if (RT_SUCCESS(rc))
+ {
+ char szThreadDesc[256];
+
+ pTestFile->fRunning = true;
+
+ /* Create the thread creating the I/O for the given file. */
+ RTStrPrintf(szThreadDesc, sizeof(szThreadDesc), "PDMACThread-%d", iTestId);
+ rc = PDMR3ThreadCreate(pVM, &pTestFile->hThread, pTestFile, tstPDMACTestFileThread,
+ NULL, 0, RTTHREADTYPE_IO, szThreadDesc);
+ if (RT_SUCCESS(rc))
+ {
+ rc = PDMR3ThreadResume(pTestFile->hThread);
+ AssertRC(rc);
+
+ RTPrintf(TESTCASE ": Created test file %s cbFileMax=%llu cbFileSegment=%u cSegments=%u cTasksActiveMax=%u\n",
+ szFile, pTestFile->cbFileMax, pTestFile->cbFileSegment, pTestFile->cSegments, pTestFile->cTasksActiveMax);
+ return VINF_SUCCESS;
+ }
+
+ PDMR3AsyncCompletionEpClose(pTestFile->hEndpoint);
+ }
+
+ RTFileDelete(szFile);
+ }
+
+ PDMR3AsyncCompletionTemplateDestroy(pTestFile->pTemplate);
+ }
+
+ RTMemFree(pTestFile->paTasks);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ RTMemFree(pTestFile->paSegs);
+ }
+ else
+ rc = VERR_NO_MEMORY;
+
+ RTPrintf(TESTCASE ": Opening test file with id %d failed rc=%Rrc\n", iTestId, rc);
+
+ return rc;
+}
+
+/**
+ * Closes a test file.
+ *
+ * @returns nothing.
+ * @param pTestFile Pointer to the test file.
+ */
+static void tstPDMACStressTestFileClose(PPDMACTESTFILE pTestFile)
+{
+ int rcThread;
+ int rc;
+
+ RTPrintf("Terminating I/O thread, please wait...\n");
+
+ /* Let the thread know that it should terminate. */
+ pTestFile->fRunning = false;
+
+ /* Wait for the thread to terminate. */
+ rc = PDMR3ThreadDestroy(pTestFile->hThread, &rcThread);
+
+ RTPrintf("Thread terminated with status code rc=%Rrc\n", rcThread);
+
+ /* Free resources */
+ RTMemFree(pTestFile->paTasks);
+ RTMemFree(pTestFile->paSegs);
+ PDMR3AsyncCompletionEpClose(pTestFile->hEndpoint);
+ PDMR3AsyncCompletionTemplateDestroy(pTestFile->pTemplate);
+}
+
+/**
+ * Inits the test pattern.
+ *
+ * @returns VBox status code.
+ */
+static int tstPDMACStressTestPatternInit(void)
+{
+ RTPrintf(TESTCASE ": Creating test pattern. Please wait...\n");
+ g_cbTestPattern = TEST_PATTERN_SIZE;
+ g_pbTestPattern = (uint8_t *)RTMemAlloc(g_cbTestPattern);
+ if (!g_pbTestPattern)
+ return VERR_NO_MEMORY;
+
+ RTRandBytes(g_pbTestPattern, g_cbTestPattern);
+ return VINF_SUCCESS;
+}
+
+static void tstPDMACStressTestPatternDestroy(void)
+{
+ RTPrintf(TESTCASE ": Destroying test pattern\n");
+ RTMemFree(g_pbTestPattern);
+}
+
+/**
+ * Entry point.
+ */
+extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
+{
+ RT_NOREF1(envp);
+ int rcRet = 0; /* error count */
+
+ RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB);
+
+ PVM pVM;
+ PUVM pUVM;
+ int rc = VMR3Create(1, NULL, NULL, NULL, NULL, NULL, &pVM, &pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Little hack to avoid the VM_ASSERT_EMT assertion.
+ */
+ RTTlsSet(pVM->pUVM->vm.s.idxTLS, &pVM->pUVM->aCpus[0]);
+ pVM->pUVM->aCpus[0].pUVM = pVM->pUVM;
+ pVM->pUVM->aCpus[0].vm.s.NativeThreadEMT = RTThreadNativeSelf();
+
+ rc = tstPDMACStressTestPatternInit();
+ if (RT_SUCCESS(rc))
+ {
+ unsigned cFilesOpened = 0;
+
+ /* Open the endpoints. */
+ for (cFilesOpened = 0; cFilesOpened < NR_OPEN_ENDPOINTS; cFilesOpened++)
+ {
+ rc = tstPDMACStressTestFileOpen(pVM, &g_aTestFiles[cFilesOpened], cFilesOpened);
+ if (RT_FAILURE(rc))
+ break;
+ }
+
+ if (RT_SUCCESS(rc))
+ {
+ /* Tests are running now. */
+ RTPrintf(TESTCASE ": Successfully opened all files. Running tests forever now or until an error is hit :)\n");
+ RTThreadSleep(RT_INDEFINITE_WAIT);
+ }
+
+ /* Close opened endpoints. */
+ for (unsigned i = 0; i < cFilesOpened; i++)
+ tstPDMACStressTestFileClose(&g_aTestFiles[i]);
+
+ tstPDMACStressTestPatternDestroy();
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": failed to init test pattern!! rc=%Rrc\n", rc);
+ rcRet++;
+ }
+
+ rc = VMR3Destroy(pUVM);
+ AssertMsg(rc == VINF_SUCCESS, ("%s: Destroying VM failed rc=%Rrc!!\n", __FUNCTION__, rc));
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": failed to create VM!! rc=%Rrc\n", rc);
+ rcRet++;
+ }
+
+ return rcRet;
+}
+
+
+#if !defined(VBOX_WITH_HARDENING) || !defined(RT_OS_WINDOWS)
+/**
+ * Main entry point.
+ */
+int main(int argc, char **argv, char **envp)
+{
+ return TrustedMain(argc, argv, envp);
+}
+#endif
+
diff --git a/src/VBox/VMM/testcase/tstSSM-2.cpp b/src/VBox/VMM/testcase/tstSSM-2.cpp
new file mode 100644
index 00000000..7b28e85d
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstSSM-2.cpp
@@ -0,0 +1,86 @@
+/* $Id: tstSSM-2.cpp $ */
+/** @file
+ * Saved State Manager Testcase: Extract the content of a saved state.
+ */
+
+/*
+ * Copyright (C) 2015-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/ssm.h>
+
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/getopt.h>
+#include <iprt/errcore.h>
+#include <iprt/file.h>
+#include <iprt/path.h>
+#include <iprt/stream.h>
+#include <iprt/initterm.h>
+
+static RTEXITCODE extractUnit(const char *pszFilename, const char *pszUnitname, const char *pszOutputFilename)
+{
+ PSSMHANDLE pSSM;
+ int rc = SSMR3Open(pszFilename, 0, &pSSM);
+ RTEXITCODE rcExit = RTEXITCODE_FAILURE;
+ if (RT_SUCCESS(rc))
+ {
+ RTFILE hFile;
+ rc = RTFileOpen(&hFile, pszOutputFilename, RTFILE_O_DENY_NONE | RTFILE_O_WRITE | RTFILE_O_CREATE);
+ if (RT_SUCCESS(rc))
+ {
+ uint32_t version = 0;
+ rc = SSMR3Seek(pSSM, pszUnitname, 0 /* iInstance */, &version);
+ size_t cbUnit = 0;
+ if (RT_SUCCESS(rc))
+ {
+ for (;;)
+ {
+ uint8_t u8;
+ rc = SSMR3GetU8(pSSM, &u8);
+ if (RT_FAILURE(rc))
+ break;
+ size_t cbWritten;
+ rc = RTFileWrite(hFile, &u8, sizeof(u8), &cbWritten);
+ cbUnit++;
+ }
+ RTPrintf("Unit size %zu bytes, version %d\n", cbUnit, version);
+ }
+ else
+ RTPrintf("Cannot find unit '%s' (%Rrc)\n", pszUnitname, rc);
+ RTFileClose(hFile);
+ }
+ else
+ RTPrintf("Cannot open output file '%s' (%Rrc)\n", pszOutputFilename, rc);
+ SSMR3Close(pSSM);
+ }
+ else
+ RTPrintf("Cannot open SSM file '%s' (%Rrc)\n", pszFilename, rc);
+ return rcExit;
+}
+
+int main(int argc, char **argv)
+{
+ int rc = RTR3InitExe(argc, &argv, 0);
+ AssertRCReturn(rc, RTEXITCODE_INIT);
+
+ if (argc != 4)
+ {
+ RTPrintf("Usage: %s <SSM filename> <SSM unitname> <outfile>\n", RTPathFilename(argv[0]));
+ /* don't fail by default */
+ return RTEXITCODE_SUCCESS;
+ }
+ return extractUnit(argv[1], argv[2], argv[3]);
+}
diff --git a/src/VBox/VMM/testcase/tstSSM.cpp b/src/VBox/VMM/testcase/tstSSM.cpp
new file mode 100644
index 00000000..6f157aaf
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstSSM.cpp
@@ -0,0 +1,935 @@
+/* $Id: tstSSM.cpp $ */
+/** @file
+ * Saved State Manager Testcase.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/ssm.h>
+#include "VMInternal.h" /* createFakeVM */
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/stam.h>
+
+#include <VBox/log.h>
+#include <VBox/sup.h>
+#include <VBox/err.h>
+#include <VBox/param.h>
+#include <iprt/assert.h>
+#include <iprt/file.h>
+#include <iprt/initterm.h>
+#include <iprt/mem.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/time.h>
+#include <iprt/thread.h>
+#include <iprt/path.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define TSTSSM_BIG_CONFIG 1
+
+#ifdef TSTSSM_BIG_CONFIG
+# define TSTSSM_ITEM_SIZE (512*_1M)
+#else
+# define TSTSSM_ITEM_SIZE (5*_1M)
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+const uint8_t gabPage[PAGE_SIZE] = {0};
+const char gachMem1[] = "sdfg\1asdfa\177hjkl;sdfghjkl;dfghjkl;dfghjkl;\0\0asdf;kjasdf;lkjasd;flkjasd;lfkjasd\0;lfk";
+#ifdef TSTSSM_BIG_CONFIG
+uint8_t gabBigMem[_1M];
+#else
+uint8_t gabBigMem[8*_1M];
+#endif
+
+
+/** initializes gabBigMem with some non zero stuff. */
+void initBigMem(void)
+{
+#if 0
+ uint32_t *puch = (uint32_t *)&gabBigMem[0];
+ uint32_t *puchEnd = (uint32_t *)&gabBigMem[sizeof(gabBigMem)];
+ uint32_t u32 = 0xdeadbeef;
+ for (; puch < puchEnd; puch++)
+ {
+ *puch = u32;
+ u32 += 19;
+ u32 = (u32 << 1) | (u32 >> 31);
+ }
+#else
+ uint8_t *pb = &gabBigMem[0];
+ uint8_t *pbEnd = &gabBigMem[sizeof(gabBigMem)];
+ for (; pb < pbEnd; pb += 16)
+ {
+ char szTmp[17];
+ RTStrPrintf(szTmp, sizeof(szTmp), "aaaa%08Xzzzz", (uint32_t)(uintptr_t)pb);
+ memcpy(pb, szTmp, 16);
+ }
+
+ /* add some zero pages */
+ memset(&gabBigMem[sizeof(gabBigMem) / 4], 0, PAGE_SIZE * 4);
+ memset(&gabBigMem[sizeof(gabBigMem) / 4 * 3], 0, PAGE_SIZE * 4);
+#endif
+}
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM handle.
+ * @param pSSM SSM operation handle.
+ */
+DECLCALLBACK(int) Item01Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ uint64_t u64Start = RTTimeNanoTS();
+ NOREF(pVM);
+
+ /*
+ * Test writing some memory block.
+ */
+ int rc = SSMR3PutMem(pSSM, gachMem1, sizeof(gachMem1));
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item01: #1 - SSMR3PutMem -> %Rrc\n", rc);
+ return rc;
+ }
+
+ /*
+ * Test writing a zeroterminated string.
+ */
+ rc = SSMR3PutStrZ(pSSM, "String");
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item01: #1 - SSMR3PutMem -> %Rrc\n", rc);
+ return rc;
+ }
+
+
+ /*
+ * Test the individual integer put functions to see that they all work.
+ * (Testcases are also known as "The Land of The Ugly Code"...)
+ */
+#define ITEM(suff,bits, val) \
+ rc = SSMR3Put##suff(pSSM, val); \
+ if (RT_FAILURE(rc)) \
+ { \
+ RTPrintf("Item01: #" #suff " - SSMR3Put" #suff "(," #val ") -> %Rrc\n", rc); \
+ return rc; \
+ }
+ /* copy & past with the load one! */
+ ITEM(U8, uint8_t, 0xff);
+ ITEM(U8, uint8_t, 0x0);
+ ITEM(U8, uint8_t, 1);
+ ITEM(U8, uint8_t, 42);
+ ITEM(U8, uint8_t, 230);
+ ITEM(S8, int8_t, -128);
+ ITEM(S8, int8_t, 127);
+ ITEM(S8, int8_t, 12);
+ ITEM(S8, int8_t, -76);
+ ITEM(U16, uint16_t, 0xffff);
+ ITEM(U16, uint16_t, 0x0);
+ ITEM(S16, int16_t, 32767);
+ ITEM(S16, int16_t, -32768);
+ ITEM(U32, uint32_t, 4294967295U);
+ ITEM(U32, uint32_t, 0);
+ ITEM(U32, uint32_t, 42);
+ ITEM(U32, uint32_t, 2342342344U);
+ ITEM(S32, int32_t, -2147483647-1);
+ ITEM(S32, int32_t, 2147483647);
+ ITEM(S32, int32_t, 42);
+ ITEM(S32, int32_t, 568459834);
+ ITEM(S32, int32_t, -58758999);
+ ITEM(U64, uint64_t, 18446744073709551615ULL);
+ ITEM(U64, uint64_t, 0);
+ ITEM(U64, uint64_t, 42);
+ ITEM(U64, uint64_t, 593023944758394234ULL);
+ ITEM(S64, int64_t, 9223372036854775807LL);
+ ITEM(S64, int64_t, -9223372036854775807LL - 1);
+ ITEM(S64, int64_t, 42);
+ ITEM(S64, int64_t, 21398723459873LL);
+ ITEM(S64, int64_t, -5848594593453453245LL);
+#undef ITEM
+
+ uint64_t u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Saved 1st item in %'RI64 ns\n", u64Elapsed);
+ return 0;
+}
+
+/**
+ * Prepare state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM handle.
+ * @param pSSM SSM operation handle.
+ * @param uVersion The data layout version.
+ * @param uPass The data pass.
+ */
+DECLCALLBACK(int) Item01Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ NOREF(pVM); NOREF(uPass);
+ if (uVersion != 0)
+ {
+ RTPrintf("Item01: uVersion=%#x, expected 0\n", uVersion);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /*
+ * Load the memory block.
+ */
+ char achTmp[sizeof(gachMem1)];
+ int rc = SSMR3GetMem(pSSM, achTmp, sizeof(gachMem1));
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item01: #1 - SSMR3GetMem -> %Rrc\n", rc);
+ return rc;
+ }
+
+ /*
+ * Load the string.
+ */
+ rc = SSMR3GetStrZ(pSSM, achTmp, sizeof(achTmp));
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item01: #2 - SSMR3GetStrZ -> %Rrc\n", rc);
+ return rc;
+ }
+
+ /*
+ * Test the individual integer put functions to see that they all work.
+ * (Testcases are also known as "The Land of The Ugly Code"...)
+ */
+#define ITEM(suff, type, val) \
+ do { \
+ type var = {0}; \
+ rc = SSMR3Get##suff(pSSM, &var); \
+ if (RT_FAILURE(rc)) \
+ { \
+ RTPrintf("Item01: #" #suff " - SSMR3Get" #suff "(," #val ") -> %Rrc\n", rc); \
+ return rc; \
+ } \
+ if (var != val) \
+ { \
+ RTPrintf("Item01: #" #suff " - SSMR3Get" #suff "(," #val ") -> %d returned wrong value!\n", rc); \
+ return VERR_GENERAL_FAILURE; \
+ } \
+ } while (0)
+ /* copy & past with the load one! */
+ ITEM(U8, uint8_t, 0xff);
+ ITEM(U8, uint8_t, 0x0);
+ ITEM(U8, uint8_t, 1);
+ ITEM(U8, uint8_t, 42);
+ ITEM(U8, uint8_t, 230);
+ ITEM(S8, int8_t, -128);
+ ITEM(S8, int8_t, 127);
+ ITEM(S8, int8_t, 12);
+ ITEM(S8, int8_t, -76);
+ ITEM(U16, uint16_t, 0xffff);
+ ITEM(U16, uint16_t, 0x0);
+ ITEM(S16, int16_t, 32767);
+ ITEM(S16, int16_t, -32768);
+ ITEM(U32, uint32_t, 4294967295U);
+ ITEM(U32, uint32_t, 0);
+ ITEM(U32, uint32_t, 42);
+ ITEM(U32, uint32_t, 2342342344U);
+ ITEM(S32, int32_t, -2147483647-1);
+ ITEM(S32, int32_t, 2147483647);
+ ITEM(S32, int32_t, 42);
+ ITEM(S32, int32_t, 568459834);
+ ITEM(S32, int32_t, -58758999);
+ ITEM(U64, uint64_t, 18446744073709551615ULL);
+ ITEM(U64, uint64_t, 0);
+ ITEM(U64, uint64_t, 42);
+ ITEM(U64, uint64_t, 593023944758394234ULL);
+ ITEM(S64, int64_t, 9223372036854775807LL);
+ ITEM(S64, int64_t, -9223372036854775807LL - 1);
+ ITEM(S64, int64_t, 42);
+ ITEM(S64, int64_t, 21398723459873LL);
+ ITEM(S64, int64_t, -5848594593453453245LL);
+#undef ITEM
+
+ return 0;
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM handle.
+ * @param pSSM SSM operation handle.
+ */
+DECLCALLBACK(int) Item02Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ NOREF(pVM);
+ uint64_t u64Start = RTTimeNanoTS();
+
+ /*
+ * Put the size.
+ */
+ uint32_t cb = sizeof(gabBigMem);
+ int rc = SSMR3PutU32(pSSM, cb);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item02: PutU32 -> %Rrc\n", rc);
+ return rc;
+ }
+
+ /*
+ * Put 8MB of memory to the file in 3 chunks.
+ */
+ uint8_t *pbMem = &gabBigMem[0];
+ uint32_t cbChunk = cb / 47;
+ rc = SSMR3PutMem(pSSM, pbMem, cbChunk);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item02: PutMem(,%p,%#x) -> %Rrc\n", pbMem, cbChunk, rc);
+ return rc;
+ }
+ cb -= cbChunk;
+ pbMem += cbChunk;
+
+ /* next piece. */
+ cbChunk *= 19;
+ rc = SSMR3PutMem(pSSM, pbMem, cbChunk);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item02: PutMem(,%p,%#x) -> %Rrc\n", pbMem, cbChunk, rc);
+ return rc;
+ }
+ cb -= cbChunk;
+ pbMem += cbChunk;
+
+ /* last piece. */
+ cbChunk = cb;
+ rc = SSMR3PutMem(pSSM, pbMem, cbChunk);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item02: PutMem(,%p,%#x) -> %Rrc\n", pbMem, cbChunk, rc);
+ return rc;
+ }
+
+ uint64_t u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Saved 2nd item in %'RI64 ns\n", u64Elapsed);
+ return 0;
+}
+
+/**
+ * Prepare state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM handle.
+ * @param pSSM SSM operation handle.
+ * @param uVersion The data layout version.
+ * @param uPass The data pass.
+ */
+DECLCALLBACK(int) Item02Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ NOREF(pVM); NOREF(uPass);
+ if (uVersion != 0)
+ {
+ RTPrintf("Item02: uVersion=%#x, expected 0\n", uVersion);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /*
+ * Load the size.
+ */
+ uint32_t cb;
+ int rc = SSMR3GetU32(pSSM, &cb);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item02: SSMR3GetU32 -> %Rrc\n", rc);
+ return rc;
+ }
+ if (cb != sizeof(gabBigMem))
+ {
+ RTPrintf("Item02: loaded size doesn't match the real thing. %#x != %#x\n", cb, sizeof(gabBigMem));
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /*
+ * Load the memory chunk by chunk.
+ */
+ uint8_t *pbMem = &gabBigMem[0];
+ char achTmp[16383];
+ uint32_t cbChunk = sizeof(achTmp);
+ while (cb > 0)
+ {
+ cbChunk -= 7;
+ if (cbChunk < 64)
+ cbChunk = sizeof(achTmp) - (cbChunk % 47);
+ if (cbChunk > cb)
+ cbChunk = cb;
+ rc = SSMR3GetMem(pSSM, &achTmp[0], cbChunk);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item02: SSMR3GetMem(,,%#x) -> %d offset %#x\n", cbChunk, rc, pbMem - &gabBigMem[0]);
+ return rc;
+ }
+ if (memcmp(achTmp, pbMem, cbChunk))
+ {
+ RTPrintf("Item02: compare failed. mem offset=%#x cbChunk=%#x\n", pbMem - &gabBigMem[0], cbChunk);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /* next */
+ pbMem += cbChunk;
+ cb -= cbChunk;
+ }
+
+ return 0;
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM handle.
+ * @param pSSM SSM operation handle.
+ */
+DECLCALLBACK(int) Item03Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ NOREF(pVM);
+ uint64_t u64Start = RTTimeNanoTS();
+
+ /*
+ * Put the size.
+ */
+ uint32_t cb = TSTSSM_ITEM_SIZE;
+ int rc = SSMR3PutU32(pSSM, cb);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item03: PutU32 -> %Rrc\n", rc);
+ return rc;
+ }
+
+ /*
+ * Put 512 MB page by page.
+ */
+ const uint8_t *pu8Org = &gabBigMem[0];
+ while (cb > 0)
+ {
+ rc = SSMR3PutMem(pSSM, pu8Org, PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item03: PutMem(,%p,%#x) -> %Rrc\n", pu8Org, PAGE_SIZE, rc);
+ return rc;
+ }
+
+ /* next */
+ cb -= PAGE_SIZE;
+ pu8Org += PAGE_SIZE;
+ if (pu8Org >= &gabBigMem[sizeof(gabBigMem)])
+ pu8Org = &gabBigMem[0];
+ }
+
+ uint64_t u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Saved 3rd item in %'RI64 ns\n", u64Elapsed);
+ return 0;
+}
+
+/**
+ * Prepare state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM handle.
+ * @param pSSM SSM operation handle.
+ * @param uVersion The data layout version.
+ * @param uPass The data pass.
+ */
+DECLCALLBACK(int) Item03Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ NOREF(pVM); NOREF(uPass);
+ if (uVersion != 123)
+ {
+ RTPrintf("Item03: uVersion=%#x, expected 123\n", uVersion);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /*
+ * Load the size.
+ */
+ uint32_t cb;
+ int rc = SSMR3GetU32(pSSM, &cb);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item03: SSMR3GetU32 -> %Rrc\n", rc);
+ return rc;
+ }
+ if (cb != TSTSSM_ITEM_SIZE)
+ {
+ RTPrintf("Item03: loaded size doesn't match the real thing. %#x != %#x\n", cb, TSTSSM_ITEM_SIZE);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /*
+ * Load the memory page by page.
+ */
+ const uint8_t *pu8Org = &gabBigMem[0];
+ while (cb > 0)
+ {
+ char achPage[PAGE_SIZE];
+ rc = SSMR3GetMem(pSSM, &achPage[0], PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item03: SSMR3GetMem(,,%#x) -> %Rrc offset %#x\n", PAGE_SIZE, rc, TSTSSM_ITEM_SIZE - cb);
+ return rc;
+ }
+ if (memcmp(achPage, pu8Org, PAGE_SIZE))
+ {
+ RTPrintf("Item03: compare failed. mem offset=%#x\n", TSTSSM_ITEM_SIZE - cb);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /* next */
+ cb -= PAGE_SIZE;
+ pu8Org += PAGE_SIZE;
+ if (pu8Org >= &gabBigMem[sizeof(gabBigMem)])
+ pu8Org = &gabBigMem[0];
+ }
+
+ return 0;
+}
+
+
+/**
+ * Execute state save operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM handle.
+ * @param pSSM SSM operation handle.
+ */
+DECLCALLBACK(int) Item04Save(PVM pVM, PSSMHANDLE pSSM)
+{
+ NOREF(pVM);
+ uint64_t u64Start = RTTimeNanoTS();
+
+ /*
+ * Put the size.
+ */
+ uint32_t cb = 512*_1M;
+ int rc = SSMR3PutU32(pSSM, cb);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item04: PutU32 -> %Rrc\n", rc);
+ return rc;
+ }
+
+ /*
+ * Put 512 MB page by page.
+ */
+ while (cb > 0)
+ {
+ rc = SSMR3PutMem(pSSM, gabPage, PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item04: PutMem(,%p,%#x) -> %Rrc\n", gabPage, PAGE_SIZE, rc);
+ return rc;
+ }
+
+ /* next */
+ cb -= PAGE_SIZE;
+ }
+
+ uint64_t u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Saved 4th item in %'RI64 ns\n", u64Elapsed);
+ return 0;
+}
+
+/**
+ * Prepare state load operation.
+ *
+ * @returns VBox status code.
+ * @param pVM The cross context VM handle.
+ * @param pSSM SSM operation handle.
+ * @param uVersion The data layout version.
+ * @param uPass The data pass.
+ */
+DECLCALLBACK(int) Item04Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
+{
+ NOREF(pVM); NOREF(uPass);
+ if (uVersion != 42)
+ {
+ RTPrintf("Item04: uVersion=%#x, expected 42\n", uVersion);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /*
+ * Load the size.
+ */
+ uint32_t cb;
+ int rc = SSMR3GetU32(pSSM, &cb);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item04: SSMR3GetU32 -> %Rrc\n", rc);
+ return rc;
+ }
+ if (cb != 512*_1M)
+ {
+ RTPrintf("Item04: loaded size doesn't match the real thing. %#x != %#x\n", cb, 512*_1M);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /*
+ * Load the memory page by page.
+ */
+ while (cb > 0)
+ {
+ char achPage[PAGE_SIZE];
+ rc = SSMR3GetMem(pSSM, &achPage[0], PAGE_SIZE);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item04: SSMR3GetMem(,,%#x) -> %Rrc offset %#x\n", PAGE_SIZE, rc, 512*_1M - cb);
+ return rc;
+ }
+ if (memcmp(achPage, gabPage, PAGE_SIZE))
+ {
+ RTPrintf("Item04: compare failed. mem offset=%#x\n", 512*_1M - cb);
+ return VERR_GENERAL_FAILURE;
+ }
+
+ /* next */
+ cb -= PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+
+/**
+ * Creates a mockup VM structure for testing SSM.
+ *
+ * @returns 0 on success, 1 on failure.
+ * @param ppVM Where to store Pointer to the VM.
+ *
+ * @todo Move this to VMM/VM since it's stuff done by several testcases.
+ */
+static int createFakeVM(PVM *ppVM)
+{
+ /*
+ * Allocate and init the UVM structure.
+ */
+ PUVM pUVM = (PUVM)RTMemPageAllocZ(sizeof(*pUVM));
+ AssertReturn(pUVM, 1);
+ pUVM->u32Magic = UVM_MAGIC;
+ pUVM->vm.s.idxTLS = RTTlsAlloc();
+ int rc = RTTlsSet(pUVM->vm.s.idxTLS, &pUVM->aCpus[0]);
+ if (RT_SUCCESS(rc))
+ {
+ pUVM->aCpus[0].pUVM = pUVM;
+ pUVM->aCpus[0].vm.s.NativeThreadEMT = RTThreadNativeSelf();
+
+ rc = STAMR3InitUVM(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ rc = MMR3InitUVM(pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Allocate and init the VM structure.
+ */
+ PVM pVM = (PVM)RTMemPageAllocZ(sizeof(VM) + sizeof(VMCPU));
+ rc = pVM ? VINF_SUCCESS : VERR_NO_PAGE_MEMORY;
+ if (RT_SUCCESS(rc))
+ {
+ pVM->enmVMState = VMSTATE_CREATED;
+ pVM->pVMR3 = pVM;
+ pVM->pUVM = pUVM;
+ pVM->cCpus = 1;
+
+ PVMCPU pVCpu = (PVMCPU)(pVM + 1);
+ pVCpu->pVMR3 = pVM;
+ pVCpu->hNativeThread = RTThreadNativeSelf();
+ pVM->apCpusR3[0] = pVCpu;
+
+ pUVM->pVM = pVM;
+ *ppVM = pVM;
+ return 0;
+ }
+
+ RTPrintf("Fatal error: failed to allocated pages for the VM structure, rc=%Rrc\n", rc);
+ }
+ else
+ RTPrintf("Fatal error: MMR3InitUVM failed, rc=%Rrc\n", rc);
+ }
+ else
+ RTPrintf("Fatal error: SSMR3InitUVM failed, rc=%Rrc\n", rc);
+ }
+ else
+ RTPrintf("Fatal error: RTTlsSet failed, rc=%Rrc\n", rc);
+
+ *ppVM = NULL;
+ return 1;
+}
+
+
+/**
+ * Destroy the VM structure.
+ *
+ * @param pVM Pointer to the VM.
+ *
+ * @todo Move this to VMM/VM since it's stuff done by several testcases.
+ */
+static void destroyFakeVM(PVM pVM)
+{
+ STAMR3TermUVM(pVM->pUVM);
+ MMR3TermUVM(pVM->pUVM);
+}
+
+
+/**
+ * Entry point.
+ */
+int main(int argc, char **argv)
+{
+ /*
+ * Init runtime and static data.
+ */
+ int rc = RTR3InitExe(argc, &argv, 0);
+ AssertRCReturn(rc, RTEXITCODE_INIT);
+ RTPrintf("tstSSM: TESTING...\n");
+ initBigMem();
+ const char *pszFilename = "SSMTestSave#1";
+
+ /*
+ * Create an fake VM structure and init SSM.
+ */
+ PVM pVM;
+ if (createFakeVM(&pVM))
+ return 1;
+
+ /*
+ * Register a few callbacks.
+ */
+ rc = SSMR3RegisterInternal(pVM, "SSM Testcase Data Item no.1 (all types)", 1, 0, 256,
+ NULL, NULL, NULL,
+ NULL, Item01Save, NULL,
+ NULL, Item01Load, NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Register #1 -> %Rrc\n", rc);
+ return 1;
+ }
+
+ rc = SSMR3RegisterInternal(pVM, "SSM Testcase Data Item no.2 (rand mem)", 2, 0, _1M * 8,
+ NULL, NULL, NULL,
+ NULL, Item02Save, NULL,
+ NULL, Item02Load, NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Register #2 -> %Rrc\n", rc);
+ return 1;
+ }
+
+ rc = SSMR3RegisterInternal(pVM, "SSM Testcase Data Item no.3 (big mem)", 0, 123, 512*_1M,
+ NULL, NULL, NULL,
+ NULL, Item03Save, NULL,
+ NULL, Item03Load, NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Register #3 -> %Rrc\n", rc);
+ return 1;
+ }
+
+ rc = SSMR3RegisterInternal(pVM, "SSM Testcase Data Item no.4 (big zero mem)", 0, 42, 512*_1M,
+ NULL, NULL, NULL,
+ NULL, Item04Save, NULL,
+ NULL, Item04Load, NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Register #4 -> %Rrc\n", rc);
+ return 1;
+ }
+
+ /*
+ * Attempt a save.
+ */
+ uint64_t u64Start = RTTimeNanoTS();
+ rc = SSMR3Save(pVM, pszFilename, NULL, NULL, SSMAFTER_DESTROY, NULL, NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Save #1 -> %Rrc\n", rc);
+ return 1;
+ }
+ uint64_t u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Saved in %'RI64 ns\n", u64Elapsed);
+
+ RTFSOBJINFO Info;
+ rc = RTPathQueryInfo(pszFilename, &Info, RTFSOBJATTRADD_NOTHING);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("tstSSM: failed to query file size: %Rrc\n", rc);
+ return 1;
+ }
+ RTPrintf("tstSSM: file size %'RI64 bytes\n", Info.cbObject);
+
+ /*
+ * Attempt a load.
+ */
+ u64Start = RTTimeNanoTS();
+ rc = SSMR3Load(pVM, pszFilename, NULL /*pStreamOps*/, NULL /*pStreamOpsUser*/,
+ SSMAFTER_RESUME, NULL /*pfnProgress*/, NULL /*pvProgressUser*/);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Load #1 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Loaded in %'RI64 ns\n", u64Elapsed);
+
+ /*
+ * Validate it.
+ */
+ u64Start = RTTimeNanoTS();
+ rc = SSMR3ValidateFile(pszFilename, false /* fChecksumIt*/ );
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3ValidateFile #1 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Validated without checksumming in %'RI64 ns\n", u64Elapsed);
+
+ u64Start = RTTimeNanoTS();
+ rc = SSMR3ValidateFile(pszFilename, true /* fChecksumIt */);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3ValidateFile #1 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Validated and checksummed in %'RI64 ns\n", u64Elapsed);
+
+ /*
+ * Open it and read.
+ */
+ u64Start = RTTimeNanoTS();
+ PSSMHANDLE pSSM;
+ rc = SSMR3Open(pszFilename, 0, &pSSM);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Open #1 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Opened in %'RI64 ns\n", u64Elapsed);
+
+ /* negative */
+ u64Start = RTTimeNanoTS();
+ rc = SSMR3Seek(pSSM, "some unit that doesn't exist", 0, NULL);
+ if (rc != VERR_SSM_UNIT_NOT_FOUND)
+ {
+ RTPrintf("SSMR3Seek #1 negative -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Failed seek in %'RI64 ns\n", u64Elapsed);
+
+ /* another negative, now only the instance number isn't matching. */
+ rc = SSMR3Seek(pSSM, "SSM Testcase Data Item no.2 (rand mem)", 0, NULL);
+ if (rc != VERR_SSM_UNIT_NOT_FOUND)
+ {
+ RTPrintf("SSMR3Seek #1 unit 2 -> %Rrc\n", rc);
+ return 1;
+ }
+
+ /* 2nd unit */
+ rc = SSMR3Seek(pSSM, "SSM Testcase Data Item no.2 (rand mem)", 2, NULL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Seek #1 unit 2 -> %Rrc [2]\n", rc);
+ return 1;
+ }
+ uint32_t uVersion = 0xbadc0ded;
+ rc = SSMR3Seek(pSSM, "SSM Testcase Data Item no.2 (rand mem)", 2, &uVersion);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Seek #1 unit 2 -> %Rrc [3]\n", rc);
+ return 1;
+ }
+ u64Start = RTTimeNanoTS();
+ rc = Item02Load(NULL, pSSM, uVersion, SSM_PASS_FINAL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item02Load #1 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Loaded 2nd item in %'RI64 ns\n", u64Elapsed);
+
+ /* 1st unit */
+ uVersion = 0xbadc0ded;
+ rc = SSMR3Seek(pSSM, "SSM Testcase Data Item no.1 (all types)", 1, &uVersion);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Seek #1 unit 1 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Start = RTTimeNanoTS();
+ rc = Item01Load(NULL, pSSM, uVersion, SSM_PASS_FINAL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item01Load #1 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Loaded 1st item in %'RI64 ns\n", u64Elapsed);
+
+ /* 3st unit */
+ uVersion = 0xbadc0ded;
+ rc = SSMR3Seek(pSSM, "SSM Testcase Data Item no.3 (big mem)", 0, &uVersion);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Seek #3 unit 1 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Start = RTTimeNanoTS();
+ rc = Item03Load(NULL, pSSM, uVersion, SSM_PASS_FINAL);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("Item01Load #3 -> %Rrc\n", rc);
+ return 1;
+ }
+ u64Elapsed = RTTimeNanoTS() - u64Start;
+ RTPrintf("tstSSM: Loaded 3rd item in %'RI64 ns\n", u64Elapsed);
+
+ /* close */
+ rc = SSMR3Close(pSSM);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf("SSMR3Close #1 -> %Rrc\n", rc);
+ return 1;
+ }
+
+ destroyFakeVM(pVM);
+
+ /* delete */
+ RTFileDelete(pszFilename);
+
+ RTPrintf("tstSSM: SUCCESS\n");
+ return 0;
+}
+
diff --git a/src/VBox/VMM/testcase/tstVMM-HM.cpp b/src/VBox/VMM/testcase/tstVMM-HM.cpp
new file mode 100644
index 00000000..2d96f815
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstVMM-HM.cpp
@@ -0,0 +1,121 @@
+/* $Id: tstVMM-HM.cpp $ */
+/** @file
+ * VMM Testcase.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/cpum.h>
+#include <iprt/errcore.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/initterm.h>
+#include <iprt/semaphore.h>
+#include <iprt/stream.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define TESTCASE "tstVMM-Hm"
+
+VMMR3DECL(int) VMMDoHmTest(PVM pVM);
+
+
+#if 0
+static DECLCALLBACK(int) tstVmmHmConfigConstructor(PUVM pUVM, PVM pVM, void *pvUser)
+{
+ RT_NOREF2(pUVM, pvUser);
+
+ /*
+ * Get root node first.
+ * This is the only node in the tree.
+ */
+ PCFGMNODE pRoot = CFGMR3GetRoot(pVM);
+ int rc = CFGMR3InsertInteger(pRoot, "RamSize", 32*1024*1024);
+ AssertRC(rc);
+
+ /* rc = CFGMR3InsertInteger(pRoot, "EnableNestedPaging", false);
+ AssertRC(rc); */
+
+ PCFGMNODE pHWVirtExt;
+ rc = CFGMR3InsertNode(pRoot, "HWVirtExt", &pHWVirtExt);
+ AssertRC(rc);
+ rc = CFGMR3InsertInteger(pHWVirtExt, "Enabled", 1);
+ AssertRC(rc);
+
+ return VINF_SUCCESS;
+}
+#endif
+
+int main(int argc, char **argv)
+{
+ RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB);
+
+ /*
+ * Doesn't work and I'm sick of rebooting the machine to try figure out
+ * what the heck is going wrong. (Linux sucks at this)
+ */
+ RTPrintf(TESTCASE ": This testcase hits a bunch of breakpoint assertions which\n"
+ TESTCASE ": causes kernel panics on linux regardless of what\n"
+ TESTCASE ": RTAssertDoBreakpoint returns. Only checked AMD-V on linux.\n");
+#if 1
+ /** @todo Make tstVMM-Hm to cause kernel panics. */
+ return 1;
+#else
+ int rcRet = 0; /* error count. */
+
+ /*
+ * Create empty VM.
+ */
+ RTPrintf(TESTCASE ": Initializing...\n");
+ PVM pVM;
+ PUVM pUVM;
+ int rc = VMR3Create(1, NULL, NULL, NULL, tstVmmHmConfigConstructor, NULL, &pVM, &pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Do testing.
+ */
+ RTPrintf(TESTCASE ": Testing...\n");
+ rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)VMMDoHmTest, 1, pVM);
+ AssertRC(rc);
+
+ STAMR3Dump(pUVM, "*");
+
+ /*
+ * Cleanup.
+ */
+ rc = VMR3Destroy(pUVM);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf(TESTCASE ": error: failed to destroy vm! rc=%d\n", rc);
+ rcRet++;
+ }
+ VMR3ReleaseUVM(pUVM);
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": fatal error: failed to create vm! rc=%d\n", rc);
+ rcRet++;
+ }
+
+ return rcRet;
+#endif
+}
diff --git a/src/VBox/VMM/testcase/tstVMMFork.cpp b/src/VBox/VMM/testcase/tstVMMFork.cpp
new file mode 100644
index 00000000..1473e820
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstVMMFork.cpp
@@ -0,0 +1,170 @@
+/* $Id: tstVMMFork.cpp $ */
+/** @file
+ * VMM Fork Test.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <iprt/errcore.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/initterm.h>
+#include <iprt/stream.h>
+
+#include <errno.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define TESTCASE "tstVMMFork"
+#define AUTO_TEST_ARGS 1
+
+VMMR3DECL(int) VMMDoTest(PVM pVM);
+
+
+int main(int argc, char* argv[])
+{
+ int rcErrors = 0;
+
+ /*
+ * Initialize the runtime.
+ */
+ RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB);
+
+#ifndef AUTO_TEST_ARGS
+ if (argc < 2)
+ {
+ RTPrintf("syntax: %s command [args]\n"
+ "\n"
+ "command Command to run under child process in fork.\n"
+ "[args] Arguments to command.\n", argv[0]);
+ return 1;
+ }
+#endif
+
+ /*
+ * Create empty VM.
+ */
+ RTPrintf(TESTCASE ": Initializing...\n");
+ PVM pVM;
+ PUVM pUVM;
+ int rc = VMR3Create(1, NULL, NULL, NULL, NULL, NULL, &pVM, &pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Do testing.
+ */
+ int iCowTester = 0;
+ char cCowTester = 'a';
+
+#ifndef AUTO_TEST_ARGS
+ int cArgs = argc - 1;
+ char **ppszArgs = &argv[1];
+#else
+ int cArgs = 2;
+ char *ppszArgs[3];
+ ppszArgs[0] = (char *)"/bin/sleep";
+ ppszArgs[1] = (char *)"3";
+ ppszArgs[2] = NULL;
+#endif
+
+ RTPrintf(TESTCASE ": forking current process...\n");
+ pid_t pid = fork();
+ if (pid < 0)
+ {
+ /* Bad. fork() failed! */
+ RTPrintf(TESTCASE ": error: fork() failed.\n");
+ rcErrors++;
+ }
+ else if (pid == 0)
+ {
+ /*
+ * The child process.
+ * Write to some local variables to trigger copy-on-write if it's used.
+ */
+ RTPrintf(TESTCASE ": running child process...\n");
+ RTPrintf(TESTCASE ": writing local variables...\n");
+ iCowTester = 2;
+ cCowTester = 'z';
+
+ RTPrintf(TESTCASE ": calling execv() with command-line:\n");
+ for (int i = 0; i < cArgs; i++)
+ RTPrintf(TESTCASE ": ppszArgs[%d]=%s\n", i, ppszArgs[i]);
+ execv(ppszArgs[0], ppszArgs);
+ RTPrintf(TESTCASE ": error: execv() returned to caller. errno=%d.\n", errno);
+ _exit(-1);
+ }
+ else
+ {
+ /*
+ * The parent process.
+ * Wait for child & run VMM test to ensure things are fine.
+ */
+ int result;
+ while (waitpid(pid, &result, 0) < 0)
+ ;
+ if (!WIFEXITED(result) || WEXITSTATUS(result) != 0)
+ {
+ RTPrintf(TESTCASE ": error: failed to run child process. errno=%d\n", errno);
+ rcErrors++;
+ }
+
+ if (rcErrors == 0)
+ {
+ RTPrintf(TESTCASE ": fork() returned fine.\n");
+ RTPrintf(TESTCASE ": testing VM after fork.\n");
+ VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)VMMDoTest, 1, pVM);
+
+ STAMR3Dump(pUVM, "*");
+ }
+ }
+
+ if (rcErrors > 0)
+ RTPrintf(TESTCASE ": error: %d error(s) during fork(). Cannot proceed to test the VM.\n", rcErrors);
+ else
+ RTPrintf(TESTCASE ": fork() and VM test, SUCCESS.\n");
+
+ /*
+ * Cleanup.
+ */
+ rc = VMR3PowerOff(pUVM);
+ if (!RT_SUCCESS(rc))
+ {
+ RTPrintf(TESTCASE ": error: failed to power off vm! rc=%Rrc\n", rc);
+ rcErrors++;
+ }
+ rc = VMR3Destroy(pUVM);
+ if (!RT_SUCCESS(rc))
+ {
+ RTPrintf(TESTCASE ": error: failed to destroy vm! rc=%Rrc\n", rc);
+ rcErrors++;
+ }
+ VMR3ReleaseUVM(pUVM);
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": fatal error: failed to create vm! rc=%Rrc\n", rc);
+ rcErrors++;
+ }
+
+ return rcErrors;
+}
diff --git a/src/VBox/VMM/testcase/tstVMMR0CallHost-1.cpp b/src/VBox/VMM/testcase/tstVMMR0CallHost-1.cpp
new file mode 100644
index 00000000..ea871db4
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstVMMR0CallHost-1.cpp
@@ -0,0 +1,181 @@
+/* $Id: tstVMMR0CallHost-1.cpp $ */
+/** @file
+ * Testcase for the VMMR0JMPBUF operations.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/errcore.h>
+#include <VBox/param.h>
+#include <iprt/alloca.h>
+#include <iprt/initterm.h>
+#include <iprt/rand.h>
+#include <iprt/string.h>
+#include <iprt/stream.h>
+#include <iprt/test.h>
+
+#define IN_VMM_R0
+#define IN_RING0 /* pretent we're in Ring-0 to get the prototypes. */
+#include <VBox/vmm/vmm.h>
+#include "VMMInternal.h"
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#if !defined(VMM_R0_SWITCH_STACK) && !defined(VMM_R0_NO_SWITCH_STACK)
+# error "VMM_R0_SWITCH_STACK or VMM_R0_NO_SWITCH_STACK has to be defined."
+#endif
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** The jump buffer. */
+static VMMR0JMPBUF g_Jmp;
+/** The number of jumps we've done. */
+static unsigned volatile g_cJmps;
+/** Number of bytes allocated last time we called foo(). */
+static size_t volatile g_cbFoo;
+/** Number of bytes used last time we called foo(). */
+static intptr_t volatile g_cbFooUsed;
+
+
+int foo(int i, int iZero, int iMinusOne)
+{
+ NOREF(iZero);
+
+ /* allocate a buffer which we fill up to the end. */
+ size_t cb = (i % 1555) + 32;
+ g_cbFoo = cb;
+ char *pv = (char *)alloca(cb);
+ RTStrPrintf(pv, cb, "i=%d%*s\n", i, cb, "");
+#ifdef VMM_R0_SWITCH_STACK
+ g_cbFooUsed = VMM_STACK_SIZE - ((uintptr_t)pv - (uintptr_t)g_Jmp.pvSavedStack);
+ RTTESTI_CHECK_MSG_RET(g_cbFooUsed < (intptr_t)VMM_STACK_SIZE - 128, ("%#x - (%p - %p) -> %#x; cb=%#x i=%d\n", VMM_STACK_SIZE, pv, g_Jmp.pvSavedStack, g_cbFooUsed, cb, i), -15);
+#elif defined(RT_ARCH_AMD64)
+ g_cbFooUsed = (uintptr_t)g_Jmp.rsp - (uintptr_t)pv;
+ RTTESTI_CHECK_MSG_RET(g_cbFooUsed < VMM_STACK_SIZE - 128, ("%p - %p -> %#x; cb=%#x i=%d\n", g_Jmp.rsp, pv, g_cbFooUsed, cb, i), -15);
+#elif defined(RT_ARCH_X86)
+ g_cbFooUsed = (uintptr_t)g_Jmp.esp - (uintptr_t)pv;
+ RTTESTI_CHECK_MSG_RET(g_cbFooUsed < (intptr_t)VMM_STACK_SIZE - 128, ("%p - %p -> %#x; cb=%#x i=%d\n", g_Jmp.esp, pv, g_cbFooUsed, cb, i), -15);
+#endif
+
+ /* Twice in a row, every 7th time. */
+ if ((i % 7) <= 1)
+ {
+ g_cJmps++;
+ int rc = vmmR0CallRing3LongJmp(&g_Jmp, 42);
+ if (!rc)
+ return i + 10000;
+ return -1;
+ }
+ NOREF(iMinusOne);
+ return i;
+}
+
+
+DECLCALLBACK(int) tst2(intptr_t i, intptr_t i2)
+{
+ RTTESTI_CHECK_MSG_RET(i >= 0 && i <= 8192, ("i=%d is out of range [0..8192]\n", i), 1);
+ RTTESTI_CHECK_MSG_RET(i2 == 0, ("i2=%d is out of range [0]\n", i2), 1);
+ int iExpect = (i % 7) <= 1 ? i + 10000 : i;
+ int rc = foo(i, 0, -1);
+ RTTESTI_CHECK_MSG_RET(rc == iExpect, ("i=%d rc=%d expected=%d\n", i, rc, iExpect), 1);
+ return 0;
+}
+
+
+DECLCALLBACK(DECL_NO_INLINE(RT_NOTHING, int)) stackRandom(PVMMR0JMPBUF pJmpBuf, PFNVMMR0SETJMP pfn, PVM pVM, PVMCPU pVCpu)
+{
+#ifdef RT_ARCH_AMD64
+ uint32_t cbRand = RTRandU32Ex(1, 96);
+#else
+ uint32_t cbRand = 1;
+#endif
+ uint8_t volatile *pabFuzz = (uint8_t volatile *)alloca(cbRand);
+ memset((void *)pabFuzz, 0xfa, cbRand);
+ int rc = vmmR0CallRing3SetJmp(pJmpBuf, pfn, pVM, pVCpu);
+ memset((void *)pabFuzz, 0xaf, cbRand);
+ return rc;
+}
+
+
+void tst(int iFrom, int iTo, int iInc)
+{
+#ifdef VMM_R0_SWITCH_STACK
+ int const cIterations = iFrom > iTo ? iFrom - iTo : iTo - iFrom;
+ void *pvPrev = alloca(1);
+#endif
+
+ RTR0PTR R0PtrSaved = g_Jmp.pvSavedStack;
+ RT_ZERO(g_Jmp);
+ g_Jmp.pvSavedStack = R0PtrSaved;
+ memset((void *)g_Jmp.pvSavedStack, '\0', VMM_STACK_SIZE);
+ g_cbFoo = 0;
+ g_cJmps = 0;
+ g_cbFooUsed = 0;
+
+ for (int i = iFrom, iItr = 0; i != iTo; i += iInc, iItr++)
+ {
+ int rc = stackRandom(&g_Jmp, (PFNVMMR0SETJMP)(uintptr_t)tst2, (PVM)(uintptr_t)i, 0);
+ RTTESTI_CHECK_MSG_RETV(rc == 0 || rc == 42, ("i=%d rc=%d setjmp; cbFoo=%#x cbFooUsed=%#x\n", i, rc, g_cbFoo, g_cbFooUsed));
+
+#ifdef VMM_R0_SWITCH_STACK
+ /* Make the stack pointer slide for the second half of the calls. */
+ if (iItr >= cIterations / 2)
+ {
+ /* Note! gcc does funny rounding up of alloca(). */
+ void *pv2 = alloca((i % 63) | 1);
+ size_t cb2 = (uintptr_t)pvPrev - (uintptr_t)pv2;
+ RTTESTI_CHECK_MSG(cb2 >= 16 && cb2 <= 128, ("cb2=%zu pv2=%p pvPrev=%p iAlloca=%d\n", cb2, pv2, pvPrev, iItr));
+ memset(pv2, 0xff, cb2);
+ memset(pvPrev, 0xee, 1);
+ pvPrev = pv2;
+ }
+#endif
+ }
+ RTTESTI_CHECK_MSG_RETV(g_cJmps, ("No jumps!"));
+ if (g_Jmp.cbUsedAvg || g_Jmp.cUsedTotal)
+ RTTestIPrintf(RTTESTLVL_ALWAYS, "cbUsedAvg=%#x cbUsedMax=%#x cUsedTotal=%#llx\n",
+ g_Jmp.cbUsedAvg, g_Jmp.cbUsedMax, g_Jmp.cUsedTotal);
+}
+
+
+int main()
+{
+ /*
+ * Init.
+ */
+ RTTEST hTest;
+ RTEXITCODE rcExit = RTTestInitAndCreate("tstVMMR0CallHost-1", &hTest);
+ if (rcExit != RTEXITCODE_SUCCESS)
+ return rcExit;
+ RTTestBanner(hTest);
+
+ g_Jmp.pvSavedStack = (RTR0PTR)RTTestGuardedAllocTail(hTest, VMM_STACK_SIZE);
+
+ /*
+ * Run two test with about 1000 long jumps each.
+ */
+ RTTestSub(hTest, "Increasing stack usage");
+ tst(0, 7000, 1);
+ RTTestSub(hTest, "Decreasing stack usage");
+ tst(7599, 0, -1);
+
+ return RTTestSummaryAndDestroy(hTest);
+}
diff --git a/src/VBox/VMM/testcase/tstVMREQ.cpp b/src/VBox/VMM/testcase/tstVMREQ.cpp
new file mode 100644
index 00000000..6b91af05
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstVMREQ.cpp
@@ -0,0 +1,344 @@
+/* $Id: tstVMREQ.cpp $ */
+/** @file
+ * VMM Testcase.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/err.h>
+#include <VBox/log.h>
+#include <iprt/assert.h>
+#include <iprt/initterm.h>
+#include <iprt/semaphore.h>
+#include <iprt/stream.h>
+#include <iprt/string.h>
+#include <iprt/thread.h>
+#include <iprt/time.h>
+
+
+/*********************************************************************************************************************************
+* Defined Constants And Macros *
+*********************************************************************************************************************************/
+#define TESTCASE "tstVMREQ"
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+/** the error count. */
+static int g_cErrors = 0;
+
+
+/**
+ * Testings va_list passing in VMSetRuntimeError.
+ */
+static DECLCALLBACK(void) MyAtRuntimeError(PUVM pUVM, void *pvUser, uint32_t fFlags, const char *pszErrorId, const char *pszFormat, va_list va)
+{
+ NOREF(pUVM);
+ if (strcmp((const char *)pvUser, "user argument"))
+ {
+ RTPrintf(TESTCASE ": pvUser=%p:{%s}!\n", pvUser, (const char *)pvUser);
+ g_cErrors++;
+ }
+ if (fFlags)
+ {
+ RTPrintf(TESTCASE ": fFlags=%#x!\n", fFlags);
+ g_cErrors++;
+ }
+ if (strcmp(pszErrorId, "enum"))
+ {
+ RTPrintf(TESTCASE ": pszErrorId=%p:{%s}!\n", pszErrorId, pszErrorId);
+ g_cErrors++;
+ }
+ if (strcmp(pszFormat, "some %s string"))
+ {
+ RTPrintf(TESTCASE ": pszFormat=%p:{%s}!\n", pszFormat, pszFormat);
+ g_cErrors++;
+ }
+
+ char szBuf[1024];
+ RTStrPrintfV(szBuf, sizeof(szBuf), pszFormat, va);
+ if (strcmp(szBuf, "some error string"))
+ {
+ RTPrintf(TESTCASE ": RTStrPrintfV -> '%s'!\n", szBuf);
+ g_cErrors++;
+ }
+}
+
+
+/**
+ * The function PassVA and PassVA2 calls.
+ */
+static DECLCALLBACK(int) PassVACallback(PUVM pUVM, unsigned u4K, unsigned u1G, const char *pszFormat, va_list *pva)
+{
+ NOREF(pUVM);
+ if (u4K != _4K)
+ {
+ RTPrintf(TESTCASE ": u4K=%#x!\n", u4K);
+ g_cErrors++;
+ }
+ if (u1G != _1G)
+ {
+ RTPrintf(TESTCASE ": u1G=%#x!\n", u1G);
+ g_cErrors++;
+ }
+
+ if (strcmp(pszFormat, "hello %s"))
+ {
+ RTPrintf(TESTCASE ": pszFormat=%p:{%s}!\n", pszFormat, pszFormat);
+ g_cErrors++;
+ }
+
+ char szBuf[1024];
+ RTStrPrintfV(szBuf, sizeof(szBuf), pszFormat, *pva);
+ if (strcmp(szBuf, "hello world"))
+ {
+ RTPrintf(TESTCASE ": RTStrPrintfV -> '%s'!\n", szBuf);
+ g_cErrors++;
+ }
+
+ return VINF_SUCCESS;
+}
+
+
+/**
+ * Functions that tests passing a va_list * argument in a request,
+ * similar to VMSetRuntimeError.
+ */
+static void PassVA2(PUVM pUVM, const char *pszFormat, va_list va)
+{
+#if 0 /** @todo test if this is a GCC problem only or also happens with AMD64+VCC80... */
+ void *pvVA = &va;
+#else
+ va_list va2;
+ va_copy(va2, va);
+ void *pvVA = &va2;
+#endif
+
+ int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)PassVACallback, 5, pUVM, _4K, _1G, pszFormat, pvVA);
+ NOREF(rc);
+
+#if 1
+ va_end(va2);
+#endif
+}
+
+
+/**
+ * Functions that tests passing a va_list * argument in a request,
+ * similar to VMSetRuntimeError.
+ */
+static void PassVA(PUVM pUVM, const char *pszFormat, ...)
+{
+ /* 1st test */
+ va_list va1;
+ va_start(va1, pszFormat);
+ int rc = VMR3ReqCallWaitU(pUVM, VMCPUID_ANY, (PFNRT)PassVACallback, 5, pUVM, _4K, _1G, pszFormat, &va1);
+ va_end(va1);
+ NOREF(rc);
+
+ /* 2nd test */
+ va_list va2;
+ va_start(va2, pszFormat);
+ PassVA2(pUVM, pszFormat, va2);
+ va_end(va2);
+}
+
+
+/**
+ * Thread function which allocates and frees requests like wildfire.
+ */
+static DECLCALLBACK(int) Thread(RTTHREAD hThreadSelf, void *pvUser)
+{
+ int rc = VINF_SUCCESS;
+ PUVM pUVM = (PUVM)pvUser;
+ NOREF(hThreadSelf);
+
+ for (unsigned i = 0; i < 100000; i++)
+ {
+ PVMREQ apReq[17];
+ const unsigned cReqs = i % RT_ELEMENTS(apReq);
+ unsigned iReq;
+ for (iReq = 0; iReq < cReqs; iReq++)
+ {
+ rc = VMR3ReqAlloc(pUVM, &apReq[iReq], VMREQTYPE_INTERNAL, VMCPUID_ANY);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf(TESTCASE ": i=%d iReq=%d cReqs=%d rc=%Rrc (alloc)\n", i, iReq, cReqs, rc);
+ return rc;
+ }
+ apReq[iReq]->iStatus = iReq + i;
+ }
+
+ for (iReq = 0; iReq < cReqs; iReq++)
+ {
+ if (apReq[iReq]->iStatus != (int)(iReq + i))
+ {
+ RTPrintf(TESTCASE ": i=%d iReq=%d cReqs=%d: iStatus=%d != %d\n", i, iReq, cReqs, apReq[iReq]->iStatus, iReq + i);
+ return VERR_GENERAL_FAILURE;
+ }
+ rc = VMR3ReqFree(apReq[iReq]);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf(TESTCASE ": i=%d iReq=%d cReqs=%d rc=%Rrc (free)\n", i, iReq, cReqs, rc);
+ return rc;
+ }
+ }
+ //if (!(i % 10000))
+ // RTPrintf(TESTCASE ": i=%d\n", i);
+ }
+
+ return VINF_SUCCESS;
+}
+
+static DECLCALLBACK(int)
+tstVMREQConfigConstructor(PUVM pUVM, PVM pVM, void *pvUser)
+{
+ RT_NOREF2(pUVM, pvUser);
+ return CFGMR3ConstructDefaultTree(pVM);
+}
+
+/**
+ * Entry point.
+ */
+extern "C" DECLEXPORT(int) TrustedMain(int argc, char **argv, char **envp)
+{
+ RT_NOREF1(envp);
+ RTR3InitExe(argc, &argv, RTR3INIT_FLAGS_SUPLIB);
+ RTPrintf(TESTCASE ": TESTING...\n");
+ RTStrmFlush(g_pStdOut);
+
+ /*
+ * Create empty VM.
+ */
+ PUVM pUVM;
+ int rc = VMR3Create(1, NULL, NULL, NULL, tstVMREQConfigConstructor, NULL, NULL, &pUVM);
+ if (RT_SUCCESS(rc))
+ {
+ /*
+ * Do testing.
+ */
+ uint64_t u64StartTS = RTTimeNanoTS();
+ RTTHREAD Thread0;
+ rc = RTThreadCreate(&Thread0, Thread, pUVM, 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "REQ1");
+ if (RT_SUCCESS(rc))
+ {
+ RTTHREAD Thread1;
+ rc = RTThreadCreate(&Thread1, Thread, pUVM, 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "REQ1");
+ if (RT_SUCCESS(rc))
+ {
+ int rcThread1;
+ rc = RTThreadWait(Thread1, RT_INDEFINITE_WAIT, &rcThread1);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf(TESTCASE ": RTThreadWait(Thread1,,) failed, rc=%Rrc\n", rc);
+ g_cErrors++;
+ }
+ if (RT_FAILURE(rcThread1))
+ g_cErrors++;
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": RTThreadCreate(&Thread1,,,,) failed, rc=%Rrc\n", rc);
+ g_cErrors++;
+ }
+
+ int rcThread0;
+ rc = RTThreadWait(Thread0, RT_INDEFINITE_WAIT, &rcThread0);
+ if (RT_FAILURE(rc))
+ {
+ RTPrintf(TESTCASE ": RTThreadWait(Thread1,,) failed, rc=%Rrc\n", rc);
+ g_cErrors++;
+ }
+ if (RT_FAILURE(rcThread0))
+ g_cErrors++;
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": RTThreadCreate(&Thread0,,,,) failed, rc=%Rrc\n", rc);
+ g_cErrors++;
+ }
+ uint64_t u64ElapsedTS = RTTimeNanoTS() - u64StartTS;
+ RTPrintf(TESTCASE ": %llu ns elapsed\n", u64ElapsedTS);
+ RTStrmFlush(g_pStdOut);
+
+ /*
+ * Print stats.
+ */
+ STAMR3Print(pUVM, "/VM/Req/*");
+
+ /*
+ * Testing va_list fun.
+ */
+ RTPrintf(TESTCASE ": va_list argument test...\n"); RTStrmFlush(g_pStdOut);
+ PassVA(pUVM, "hello %s", "world");
+ VMR3AtRuntimeErrorRegister(pUVM, MyAtRuntimeError, (void *)"user argument");
+ VMSetRuntimeError(VMR3GetVM(pUVM), 0 /*fFlags*/, "enum", "some %s string", "error");
+
+ /*
+ * Cleanup.
+ */
+ rc = VMR3PowerOff(pUVM);
+ if (!RT_SUCCESS(rc))
+ {
+ RTPrintf(TESTCASE ": error: failed to power off vm! rc=%Rrc\n", rc);
+ g_cErrors++;
+ }
+ rc = VMR3Destroy(pUVM);
+ if (!RT_SUCCESS(rc))
+ {
+ RTPrintf(TESTCASE ": error: failed to destroy vm! rc=%Rrc\n", rc);
+ g_cErrors++;
+ }
+ VMR3ReleaseUVM(pUVM);
+ }
+ else if (rc == VERR_SVM_NO_SVM || rc == VERR_VMX_NO_VMX)
+ {
+ RTPrintf(TESTCASE ": Skipped: %Rrc\n", rc);
+ return RTEXITCODE_SKIPPED;
+ }
+ else
+ {
+ RTPrintf(TESTCASE ": fatal error: failed to create vm! rc=%Rrc\n", rc);
+ g_cErrors++;
+ }
+
+ /*
+ * Summary and return.
+ */
+ if (!g_cErrors)
+ RTPrintf(TESTCASE ": SUCCESS\n");
+ else
+ RTPrintf(TESTCASE ": FAILURE - %d errors\n", g_cErrors);
+
+ return !!g_cErrors;
+}
+
+
+#if !defined(VBOX_WITH_HARDENING) || !defined(RT_OS_WINDOWS)
+/**
+ * Main entry point.
+ */
+int main(int argc, char **argv, char **envp)
+{
+ return TrustedMain(argc, argv, envp);
+}
+#endif
+
diff --git a/src/VBox/VMM/testcase/tstVMStruct.h b/src/VBox/VMM/testcase/tstVMStruct.h
new file mode 100644
index 00000000..60920488
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstVMStruct.h
@@ -0,0 +1,1494 @@
+/* $Id: tstVMStruct.h $ */
+/** @file
+ * tstVMMStruct - Statements for generating VM and VMCPU offset and size tests.
+ *
+ * This is used by tstVMStructRC.cpp and tstVMStructDTrace.cpp. Tests that
+ * are not yet available in DTrace are blocked by VBOX_FOR_DTRACE_LIB.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+#ifndef VBOX_FOR_DTRACE_LIB
+ GEN_CHECK_SIZE(CFGM);
+#endif
+
+ GEN_CHECK_SIZE(X86CPUIDFEATECX);
+
+ GEN_CHECK_SIZE(CPUM); // has .mac
+ GEN_CHECK_OFF(CPUM, fHostUseFlags);
+ GEN_CHECK_OFF(CPUM, CR4);
+#ifndef VBOX_FOR_DTRACE_LIB
+ GEN_CHECK_OFF(CPUM, u8PortableCpuIdLevel);
+ GEN_CHECK_OFF(CPUM, fPendingRestore);
+#endif
+ GEN_CHECK_OFF(CPUM, aGuestCpuIdPatmStd);
+ GEN_CHECK_OFF(CPUM, aGuestCpuIdPatmExt);
+ GEN_CHECK_OFF(CPUM, aGuestCpuIdPatmCentaur);
+
+ GEN_CHECK_SIZE(CPUMCPU); // has .mac
+ GEN_CHECK_OFF(CPUMCPU, Hyper);
+ GEN_CHECK_OFF(CPUMCPU, Host);
+#ifdef VBOX_WITH_CRASHDUMP_MAGIC
+ GEN_CHECK_OFF(CPUMCPU, aMagic);
+ GEN_CHECK_OFF(CPUMCPU, uMagic);
+#endif
+ GEN_CHECK_OFF(CPUMCPU, Guest);
+ GEN_CHECK_OFF(CPUMCPU, GuestMsrs);
+ GEN_CHECK_OFF(CPUMCPU, fUseFlags);
+ GEN_CHECK_OFF(CPUMCPU, fChanged);
+ GEN_CHECK_OFF(CPUMCPU, u32RetCode);
+#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
+ GEN_CHECK_OFF(CPUMCPU, pvApicBase);
+ GEN_CHECK_OFF(CPUMCPU, fApicDisVectors);
+ GEN_CHECK_OFF(CPUMCPU, fX2Apic);
+#endif
+ GEN_CHECK_OFF(CPUMCPU, fRemEntered);
+
+ GEN_CHECK_SIZE(CPUMHOSTCTX);
+ GEN_CHECK_OFF(CPUMHOSTCTX, pXStateR3);
+ GEN_CHECK_OFF(CPUMHOSTCTX, pXStateR0);
+#if HC_ARCH_BITS == 64
+ GEN_CHECK_OFF(CPUMHOSTCTX, rbx);
+ GEN_CHECK_OFF(CPUMHOSTCTX, rdi);
+ GEN_CHECK_OFF(CPUMHOSTCTX, rsi);
+ GEN_CHECK_OFF(CPUMHOSTCTX, rbp);
+ GEN_CHECK_OFF(CPUMHOSTCTX, rsp);
+ GEN_CHECK_OFF(CPUMHOSTCTX, r10);
+ GEN_CHECK_OFF(CPUMHOSTCTX, r11);
+ GEN_CHECK_OFF(CPUMHOSTCTX, r12);
+ GEN_CHECK_OFF(CPUMHOSTCTX, r13);
+ GEN_CHECK_OFF(CPUMHOSTCTX, r14);
+ GEN_CHECK_OFF(CPUMHOSTCTX, r15);
+ GEN_CHECK_OFF(CPUMHOSTCTX, rflags);
+#endif
+#if HC_ARCH_BITS == 32
+ GEN_CHECK_OFF(CPUMHOSTCTX, ebx);
+ GEN_CHECK_OFF(CPUMHOSTCTX, edi);
+ GEN_CHECK_OFF(CPUMHOSTCTX, esi);
+ GEN_CHECK_OFF(CPUMHOSTCTX, ebp);
+ GEN_CHECK_OFF(CPUMHOSTCTX, eflags);
+ GEN_CHECK_OFF(CPUMHOSTCTX, esp);
+#endif
+ GEN_CHECK_OFF(CPUMHOSTCTX, ss);
+ GEN_CHECK_OFF(CPUMHOSTCTX, gs);
+ GEN_CHECK_OFF(CPUMHOSTCTX, fs);
+ GEN_CHECK_OFF(CPUMHOSTCTX, es);
+ GEN_CHECK_OFF(CPUMHOSTCTX, ds);
+ GEN_CHECK_OFF(CPUMHOSTCTX, cs);
+#if HC_ARCH_BITS == 32
+ GEN_CHECK_OFF(CPUMHOSTCTX, cr0);
+ GEN_CHECK_OFF(CPUMHOSTCTX, cr3);
+ GEN_CHECK_OFF(CPUMHOSTCTX, cr4);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr0);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr1);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr2);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr3);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr6);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr7);
+ GEN_CHECK_OFF(CPUMHOSTCTX, gdtr);
+ GEN_CHECK_OFF(CPUMHOSTCTX, idtr);
+ GEN_CHECK_OFF(CPUMHOSTCTX, ldtr);
+ GEN_CHECK_OFF(CPUMHOSTCTX, tr);
+ GEN_CHECK_OFF(CPUMHOSTCTX, SysEnter);
+ GEN_CHECK_OFF(CPUMHOSTCTX, efer);
+#elif HC_ARCH_BITS == 64
+ GEN_CHECK_OFF(CPUMHOSTCTX, cr0);
+ GEN_CHECK_OFF(CPUMHOSTCTX, cr3);
+ GEN_CHECK_OFF(CPUMHOSTCTX, cr4);
+ GEN_CHECK_OFF(CPUMHOSTCTX, cr8);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr0);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr1);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr2);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr3);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr6);
+ GEN_CHECK_OFF(CPUMHOSTCTX, dr7);
+ GEN_CHECK_OFF(CPUMHOSTCTX, gdtr);
+ GEN_CHECK_OFF(CPUMHOSTCTX, idtr);
+ GEN_CHECK_OFF(CPUMHOSTCTX, ldtr);
+ GEN_CHECK_OFF(CPUMHOSTCTX, tr);
+ GEN_CHECK_OFF(CPUMHOSTCTX, SysEnter);
+ GEN_CHECK_OFF(CPUMHOSTCTX, FSbase);
+ GEN_CHECK_OFF(CPUMHOSTCTX, GSbase);
+ GEN_CHECK_OFF(CPUMHOSTCTX, efer);
+#else
+# error HC_ARCH_BITS not defined
+#endif
+
+ GEN_CHECK_SIZE(CPUMCTX);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uMsrHSavePa);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.GCPhysVmcb);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pVmcbR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pVmcbR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HostState);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.uPrevPauseTick);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilter);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.cPauseFilterThreshold);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.fInterceptEvents);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvMsrBitmapR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvIoBitmapR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.pvIoBitmapR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.svm.HCPhysVmcb);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.GCPhysVmxon);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.GCPhysVmcs);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.enmDiag);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.enmAbort);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uDiagAux);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uAbortAux);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInVmxRootMode);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInVmxNonRootMode);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fInterceptEvents);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fNmiUnblockingIret);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pVmcsR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pShadowVmcsR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVirtApicPageR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmreadBitmapR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvVmwriteBitmapR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pEntryMsrLoadAreaR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pEntryMsrLoadAreaR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrStoreAreaR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrStoreAreaR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrLoadAreaR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pExitMsrLoadAreaR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvMsrBitmapR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvMsrBitmapR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvIoBitmapR0);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.pvIoBitmapR3);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uFirstPauseLoopTick);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uPrevPauseTick);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.uEntryTick);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.offVirtApicWrite);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.fVirtNmiBlocking);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.Msrs);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmcs);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysShadowVmcs);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmreadBitmap);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysVmwriteBitmap);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysEntryMsrLoadArea);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysExitMsrStoreArea);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysExitMsrLoadArea);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysMsrBitmap);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.vmx.HCPhysIoBitmap);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.enmHwvirt);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.fGif);
+ GEN_CHECK_OFF(CPUMCTX, hwvirt.fLocalForcedActions);
+ /** @todo NSTVMX: add rest of hwvirt fields when code is more
+ * finalized. */
+ GEN_CHECK_OFF(CPUMCTX, pXStateR0);
+ GEN_CHECK_OFF(CPUMCTX, pXStateR3);
+ GEN_CHECK_OFF(CPUMCTX, rdi);
+ GEN_CHECK_OFF(CPUMCTX, rsi);
+ GEN_CHECK_OFF(CPUMCTX, rbp);
+ GEN_CHECK_OFF(CPUMCTX, rax);
+ GEN_CHECK_OFF(CPUMCTX, rbx);
+ GEN_CHECK_OFF(CPUMCTX, rdx);
+ GEN_CHECK_OFF(CPUMCTX, rcx);
+ GEN_CHECK_OFF(CPUMCTX, rsp);
+ GEN_CHECK_OFF(CPUMCTX, es);
+#ifndef VBOX_FOR_DTRACE_LIB
+ GEN_CHECK_OFF(CPUMCTX, es.Sel);
+ GEN_CHECK_OFF(CPUMCTX, es.ValidSel);
+ GEN_CHECK_OFF(CPUMCTX, es.fFlags);
+ GEN_CHECK_OFF(CPUMCTX, es.u64Base);
+ GEN_CHECK_OFF(CPUMCTX, es.u32Limit);
+ GEN_CHECK_OFF(CPUMCTX, es.Attr);
+#endif
+ GEN_CHECK_OFF(CPUMCTX, cs);
+ GEN_CHECK_OFF(CPUMCTX, ss);
+ GEN_CHECK_OFF(CPUMCTX, ds);
+ GEN_CHECK_OFF(CPUMCTX, fs);
+ GEN_CHECK_OFF(CPUMCTX, gs);
+ GEN_CHECK_OFF(CPUMCTX, rflags);
+ GEN_CHECK_OFF(CPUMCTX, rip);
+ GEN_CHECK_OFF(CPUMCTX, r8);
+ GEN_CHECK_OFF(CPUMCTX, r9);
+ GEN_CHECK_OFF(CPUMCTX, r10);
+ GEN_CHECK_OFF(CPUMCTX, r11);
+ GEN_CHECK_OFF(CPUMCTX, r12);
+ GEN_CHECK_OFF(CPUMCTX, r13);
+ GEN_CHECK_OFF(CPUMCTX, r14);
+ GEN_CHECK_OFF(CPUMCTX, r15);
+ GEN_CHECK_OFF(CPUMCTX, cr0);
+ GEN_CHECK_OFF(CPUMCTX, cr2);
+ GEN_CHECK_OFF(CPUMCTX, cr3);
+ GEN_CHECK_OFF(CPUMCTX, cr4);
+ GEN_CHECK_OFF(CPUMCTX, dr);
+ GEN_CHECK_OFF(CPUMCTX, gdtr);
+ GEN_CHECK_OFF(CPUMCTX, idtr);
+ GEN_CHECK_OFF(CPUMCTX, ldtr);
+ GEN_CHECK_OFF(CPUMCTX, tr);
+ GEN_CHECK_OFF(CPUMCTX, SysEnter);
+ GEN_CHECK_OFF(CPUMCTX, msrEFER);
+ GEN_CHECK_OFF(CPUMCTX, msrSTAR);
+ GEN_CHECK_OFF(CPUMCTX, msrPAT);
+ GEN_CHECK_OFF(CPUMCTX, msrLSTAR);
+ GEN_CHECK_OFF(CPUMCTX, msrCSTAR);
+ GEN_CHECK_OFF(CPUMCTX, msrSFMASK);
+ GEN_CHECK_OFF(CPUMCTX, msrKERNELGSBASE);
+ GEN_CHECK_OFF(CPUMCTX, ldtr);
+ GEN_CHECK_OFF(CPUMCTX, tr);
+#ifndef VBOX_FOR_DTRACE_LIB
+ GEN_CHECK_OFF(CPUMCTX, tr.Sel);
+ GEN_CHECK_OFF(CPUMCTX, tr.ValidSel);
+ GEN_CHECK_OFF(CPUMCTX, tr.fFlags);
+ GEN_CHECK_OFF(CPUMCTX, tr.u64Base);
+ GEN_CHECK_OFF(CPUMCTX, tr.u32Limit);
+ GEN_CHECK_OFF(CPUMCTX, tr.Attr);
+#endif
+
+ GEN_CHECK_SIZE(CPUMCTXMSRS);
+ GEN_CHECK_SIZE(CPUMCTXCORE);
+
+#ifndef VBOX_FOR_DTRACE_LIB
+ GEN_CHECK_SIZE(STAMRATIOU32);
+ GEN_CHECK_SIZE(AVLOHCPHYSNODECORE);
+ GEN_CHECK_SIZE(AVLOGCPHYSNODECORE);
+ GEN_CHECK_SIZE(AVLROGCPHYSNODECORE);
+ GEN_CHECK_SIZE(AVLOGCPTRNODECORE);
+ GEN_CHECK_SIZE(AVLROGCPTRNODECORE);
+ GEN_CHECK_SIZE(AVLOIOPORTNODECORE);
+ GEN_CHECK_SIZE(AVLROIOPORTNODECORE);
+
+ GEN_CHECK_SIZE(DBGF);
+ GEN_CHECK_OFF(DBGF, bmHardIntBreakpoints);
+ GEN_CHECK_OFF(DBGF, bmSoftIntBreakpoints);
+ GEN_CHECK_OFF(DBGF, bmSelectedEvents);
+ GEN_CHECK_OFF(DBGF, cHardIntBreakpoints);
+ GEN_CHECK_OFF(DBGF, cSoftIntBreakpoints);
+ GEN_CHECK_OFF(DBGF, cSelectedEvents);
+ GEN_CHECK_OFF(DBGF, fAttached);
+ GEN_CHECK_OFF(DBGF, fStoppedInHyper);
+ GEN_CHECK_OFF(DBGF, PingPong);
+ GEN_CHECK_OFF(DBGF, DbgEvent);
+ GEN_CHECK_OFF(DBGF, enmVMMCmd);
+ GEN_CHECK_OFF(DBGF, VMMCmdData);
+ //GEN_CHECK_OFF(DBGF, pInfoFirst);
+ //GEN_CHECK_OFF(DBGF, InfoCritSect);
+ GEN_CHECK_OFF(DBGF, cEnabledHwBreakpoints);
+ GEN_CHECK_OFF(DBGF, cEnabledHwIoBreakpoints);
+ GEN_CHECK_OFF(DBGF, aHwBreakpoints);
+ GEN_CHECK_OFF(DBGF, aBreakpoints);
+ GEN_CHECK_OFF(DBGF, Mmio);
+ GEN_CHECK_OFF(DBGF, PortIo);
+ GEN_CHECK_OFF(DBGF, Int3);
+ //GEN_CHECK_OFF(DBGF, hAsDbLock);
+ //GEN_CHECK_OFF(DBGF, hRegDbLock);
+ //GEN_CHECK_OFF(DBGF, RegSetSpace);
+ //GEN_CHECK_OFF(DBGF, pCurOS);
+ GEN_CHECK_SIZE(DBGFEVENT);
+
+ GEN_CHECK_SIZE(DBGFCPU);
+ GEN_CHECK_OFF(DBGFCPU, iActiveBp);
+ GEN_CHECK_OFF(DBGFCPU, fSingleSteppingRaw);
+ GEN_CHECK_OFF(DBGFCPU, cEvents);
+ GEN_CHECK_OFF(DBGFCPU, aEvents);
+ GEN_CHECK_OFF(DBGFCPU, aEvents[1]);
+ GEN_CHECK_OFF(DBGFCPU, aEvents[1].Event);
+ GEN_CHECK_OFF(DBGFCPU, aEvents[1].Event.enmCtx);
+ GEN_CHECK_OFF(DBGFCPU, aEvents[1].Event.enmType);
+ GEN_CHECK_OFF(DBGFCPU, aEvents[1].Event.u.Bp.iBp);
+ GEN_CHECK_OFF(DBGFCPU, aEvents[1].rip);
+ GEN_CHECK_OFF(DBGFCPU, aEvents[1].enmState);
+ //GEN_CHECK_OFF(DBGFCPU, pGuestRegSet);
+ //GEN_CHECK_OFF(DBGFCPU, pHyperRegSet);
+
+ GEN_CHECK_SIZE(EM);
+ GEN_CHECK_OFF(EM, offVM);
+ GEN_CHECK_OFF(EMCPU, enmState);
+ GEN_CHECK_OFF_DOT(EMCPU, u.achPaddingFatalLongJump);
+ GEN_CHECK_OFF(EMCPU, DisState);
+ GEN_CHECK_OFF(EMCPU, StatForcedActions);
+ GEN_CHECK_OFF(EMCPU, StatTotalClis);
+ GEN_CHECK_OFF(EMCPU, pStatsR3);
+ GEN_CHECK_OFF(EMCPU, pStatsR0);
+ GEN_CHECK_OFF(EMCPU, pStatsRC);
+ GEN_CHECK_OFF(EMCPU, pCliStatTree);
+ GEN_CHECK_OFF(EMCPU, PendingIoPortAccess);
+ GEN_CHECK_OFF_DOT(EMCPU, PendingIoPortAccess.uPort);
+ GEN_CHECK_OFF_DOT(EMCPU, PendingIoPortAccess.cbValue);
+ GEN_CHECK_OFF_DOT(EMCPU, PendingIoPortAccess.uValue);
+ GEN_CHECK_OFF(EMCPU, MWait);
+ GEN_CHECK_OFF_DOT(EMCPU, MWait.fWait);
+ GEN_CHECK_OFF_DOT(EMCPU, MWait.uMWaitRAX);
+ GEN_CHECK_OFF_DOT(EMCPU, MWait.uMWaitRCX);
+ GEN_CHECK_OFF_DOT(EMCPU, MWait.uMonitorRAX);
+ GEN_CHECK_OFF_DOT(EMCPU, MWait.uMonitorRCX);
+ GEN_CHECK_OFF_DOT(EMCPU, MWait.uMonitorRDX);
+
+ GEN_CHECK_SIZE(IEMCPU);
+ GEN_CHECK_OFF(IEMCPU, enmCpuMode);
+ GEN_CHECK_OFF(IEMCPU, fPrefixes);
+ GEN_CHECK_OFF(IEMCPU, abOpcode);
+ GEN_CHECK_OFF(IEMCPU, cActiveMappings);
+ GEN_CHECK_OFF(IEMCPU, iNextMapping);
+ GEN_CHECK_OFF(IEMCPU, aMemMappings);
+ GEN_CHECK_OFF(IEMCPU, aMemMappings[1]);
+ GEN_CHECK_OFF(IEMCPU, aBounceBuffers);
+ GEN_CHECK_OFF(IEMCPU, aBounceBuffers[1]);
+ GEN_CHECK_OFF(IEMCPU, aMemBbMappings);
+ GEN_CHECK_OFF(IEMCPU, aMemBbMappings[1]);
+ GEN_CHECK_OFF(IEMCPU, cLogRelRdMsr);
+ GEN_CHECK_OFF(IEMCPU, cLogRelWrMsr);
+ GEN_CHECK_OFF(IEMCPU, DataTlb);
+ GEN_CHECK_OFF(IEMCPU, CodeTlb);
+
+ GEN_CHECK_SIZE(IOM);
+ GEN_CHECK_OFF(IOM, pTreesRC);
+ GEN_CHECK_OFF(IOM, pTreesR3);
+ GEN_CHECK_OFF(IOM, pTreesR0);
+
+ GEN_CHECK_SIZE(IOMCPU);
+ GEN_CHECK_OFF(IOMCPU, DisState);
+ GEN_CHECK_OFF(IOMCPU, PendingIOPortWrite);
+ GEN_CHECK_OFF(IOMCPU, PendingIOPortWrite.IOPort);
+ GEN_CHECK_OFF(IOMCPU, PendingIOPortWrite.u32Value);
+ GEN_CHECK_OFF(IOMCPU, PendingIOPortWrite.cbValue);
+ GEN_CHECK_OFF(IOMCPU, PendingMmioWrite);
+ GEN_CHECK_OFF(IOMCPU, PendingMmioWrite.GCPhys);
+ GEN_CHECK_OFF(IOMCPU, PendingMmioWrite.abValue);
+ GEN_CHECK_OFF(IOMCPU, PendingMmioWrite.cbValue);
+ GEN_CHECK_OFF(IOMCPU, pMMIORangeLastR3);
+ GEN_CHECK_OFF(IOMCPU, pMMIOStatsLastR3);
+ GEN_CHECK_OFF(IOMCPU, pMMIORangeLastR0);
+ GEN_CHECK_OFF(IOMCPU, pMMIOStatsLastR0);
+ GEN_CHECK_OFF(IOMCPU, pMMIORangeLastRC);
+ GEN_CHECK_OFF(IOMCPU, pMMIOStatsLastRC);
+ GEN_CHECK_OFF(IOMCPU, pRangeLastReadR0);
+ GEN_CHECK_OFF(IOMCPU, pRangeLastReadRC);
+
+ GEN_CHECK_SIZE(IOMMMIORANGE);
+ GEN_CHECK_OFF(IOMMMIORANGE, GCPhys);
+ GEN_CHECK_OFF(IOMMMIORANGE, cb);
+ GEN_CHECK_OFF(IOMMMIORANGE, cRefs);
+ GEN_CHECK_OFF(IOMMMIORANGE, fFlags);
+ GEN_CHECK_OFF(IOMMMIORANGE, pszDesc);
+ GEN_CHECK_OFF(IOMMMIORANGE, pvUserR3);
+ GEN_CHECK_OFF(IOMMMIORANGE, pDevInsR3);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnWriteCallbackR3);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnReadCallbackR3);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnFillCallbackR3);
+ GEN_CHECK_OFF(IOMMMIORANGE, pvUserR0);
+ GEN_CHECK_OFF(IOMMMIORANGE, pDevInsR0);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnWriteCallbackR0);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnReadCallbackR0);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnFillCallbackR0);
+ GEN_CHECK_OFF(IOMMMIORANGE, pvUserRC);
+ GEN_CHECK_OFF(IOMMMIORANGE, pDevInsRC);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnWriteCallbackRC);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnReadCallbackRC);
+ GEN_CHECK_OFF(IOMMMIORANGE, pfnFillCallbackRC);
+
+ GEN_CHECK_SIZE(IOMMMIOSTATS);
+ GEN_CHECK_OFF(IOMMMIOSTATS, Accesses);
+ GEN_CHECK_OFF(IOMMMIOSTATS, WriteRZToR3);
+
+ GEN_CHECK_SIZE(IOMIOPORTRANGER0);
+ GEN_CHECK_OFF(IOMIOPORTRANGER0, Port);
+ GEN_CHECK_OFF(IOMIOPORTRANGER0, cPorts);
+ GEN_CHECK_OFF(IOMIOPORTRANGER0, pvUser);
+ GEN_CHECK_OFF(IOMIOPORTRANGER0, pDevIns);
+ GEN_CHECK_OFF(IOMIOPORTRANGER0, pszDesc);
+
+ GEN_CHECK_SIZE(IOMIOPORTRANGERC);
+ GEN_CHECK_OFF(IOMIOPORTRANGERC, Port);
+ GEN_CHECK_OFF(IOMIOPORTRANGERC, cPorts);
+ GEN_CHECK_OFF(IOMIOPORTRANGERC, pvUser);
+ GEN_CHECK_OFF(IOMIOPORTRANGERC, pDevIns);
+ GEN_CHECK_OFF(IOMIOPORTRANGERC, pszDesc);
+
+ GEN_CHECK_SIZE(IOMIOPORTSTATS);
+ GEN_CHECK_OFF(IOMIOPORTSTATS, InR3);
+
+ GEN_CHECK_SIZE(IOMTREES);
+ GEN_CHECK_OFF(IOMTREES, IOPortTreeR3);
+ GEN_CHECK_OFF(IOMTREES, IOPortTreeR0);
+ GEN_CHECK_OFF(IOMTREES, IOPortTreeRC);
+ GEN_CHECK_OFF(IOMTREES, MMIOTree);
+ GEN_CHECK_OFF(IOMTREES, IOPortStatTree);
+ GEN_CHECK_OFF(IOMTREES, MmioStatTree);
+
+ GEN_CHECK_SIZE(MM);
+ GEN_CHECK_OFF(MM, offVM);
+ GEN_CHECK_OFF(MM, offHyperNextStatic);
+ GEN_CHECK_OFF(MM, cbHyperArea);
+ GEN_CHECK_OFF(MM, fDoneMMR3InitPaging);
+ GEN_CHECK_OFF(MM, fPGMInitialized);
+ GEN_CHECK_OFF(MM, offLookupHyper);
+ GEN_CHECK_OFF(MM, pHyperHeapRC);
+ GEN_CHECK_OFF(MM, pHyperHeapR3);
+ GEN_CHECK_OFF(MM, pHyperHeapR0);
+ GEN_CHECK_OFF(MM, pPagePoolR3);
+ GEN_CHECK_OFF(MM, pPagePoolLowR3);
+#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
+ GEN_CHECK_OFF(MM, pPagePoolR0);
+ GEN_CHECK_OFF(MM, pPagePoolLowR0);
+#endif
+ GEN_CHECK_OFF(MM, pvDummyPage);
+ GEN_CHECK_OFF(MM, HCPhysDummyPage);
+ GEN_CHECK_OFF(MM, cbRamBase);
+ GEN_CHECK_OFF(MM, cBasePages);
+ GEN_CHECK_OFF(MM, cHandyPages);
+ GEN_CHECK_OFF(MM, cShadowPages);
+ GEN_CHECK_OFF(MM, cFixedPages);
+ GEN_CHECK_SIZE(MMHYPERSTAT);
+ GEN_CHECK_SIZE(MMHYPERCHUNK);
+ GEN_CHECK_SIZE(MMHYPERCHUNKFREE);
+ GEN_CHECK_SIZE(MMHYPERHEAP);
+ GEN_CHECK_OFF(MMHYPERHEAP, u32Magic);
+ GEN_CHECK_OFF(MMHYPERHEAP, cbHeap);
+ GEN_CHECK_OFF(MMHYPERHEAP, pbHeapR3);
+ GEN_CHECK_OFF(MMHYPERHEAP, pVMR3);
+ GEN_CHECK_OFF(MMHYPERHEAP, pbHeapR0);
+ GEN_CHECK_OFF(MMHYPERHEAP, pVMR0);
+ GEN_CHECK_OFF(MMHYPERHEAP, pbHeapRC);
+ GEN_CHECK_OFF(MMHYPERHEAP, pVMRC);
+ GEN_CHECK_OFF(MMHYPERHEAP, cbFree);
+ GEN_CHECK_OFF(MMHYPERHEAP, offFreeHead);
+ GEN_CHECK_OFF(MMHYPERHEAP, offFreeTail);
+ GEN_CHECK_OFF(MMHYPERHEAP, offPageAligned);
+ GEN_CHECK_OFF(MMHYPERHEAP, HyperHeapStatTree);
+ GEN_CHECK_SIZE(MMLOOKUPHYPER);
+ GEN_CHECK_OFF(MMLOOKUPHYPER, offNext);
+ GEN_CHECK_OFF(MMLOOKUPHYPER, off);
+ GEN_CHECK_OFF(MMLOOKUPHYPER, cb);
+ GEN_CHECK_OFF(MMLOOKUPHYPER, enmType);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.Locked.pvR3);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.Locked.pvR0);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.Locked.paHCPhysPages);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.HCPhys.pvR3);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.HCPhys.HCPhys);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.GCPhys.GCPhys);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.MMIO2.pDevIns);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.MMIO2.iSubDev);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.MMIO2.iRegion);
+ GEN_CHECK_OFF_DOT(MMLOOKUPHYPER, u.MMIO2.off);
+ GEN_CHECK_OFF(MMLOOKUPHYPER, pszDesc);
+
+ GEN_CHECK_SIZE(NEM);
+ GEN_CHECK_SIZE(NEMCPU);
+
+ GEN_CHECK_SIZE(PDM);
+ GEN_CHECK_OFF(PDM, CritSect);
+ GEN_CHECK_OFF(PDM, NopCritSect);
+ GEN_CHECK_OFF(PDM, pDevs);
+ GEN_CHECK_OFF(PDM, pDevInstances);
+ GEN_CHECK_OFF(PDM, pUsbDevs);
+ GEN_CHECK_OFF(PDM, pUsbInstances);
+ GEN_CHECK_OFF(PDM, pDrvs);
+ GEN_CHECK_OFF(PDM, aPciBuses);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].iBus);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].pDevInsR3);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].pfnSetIrqR3);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].pfnRegisterR3);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].pfnIORegionRegisterR3);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].pDevInsR0);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].pfnSetIrqR0);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].pDevInsRC);
+ GEN_CHECK_OFF_DOT(PDM, aPciBuses[0].pfnSetIrqRC);
+ GEN_CHECK_OFF(PDM, Pic);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pDevInsR3);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pfnSetIrqR3);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pfnGetInterruptR3);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pDevInsR0);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pfnSetIrqR0);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pfnGetInterruptR0);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pDevInsRC);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pfnSetIrqRC);
+ GEN_CHECK_OFF_DOT(PDM, Pic.pfnGetInterruptRC);
+ GEN_CHECK_OFF(PDM, Apic);
+ GEN_CHECK_OFF_DOT(PDM, Apic.pDevInsR3);
+ GEN_CHECK_OFF_DOT(PDM, Apic.pDevInsR0);
+ GEN_CHECK_OFF_DOT(PDM, Apic.pDevInsRC);
+ GEN_CHECK_OFF(PDM, IoApic);
+ GEN_CHECK_OFF_DOT(PDM, IoApic.pDevInsR3);
+ GEN_CHECK_OFF_DOT(PDM, IoApic.pfnSetIrqR3);
+ GEN_CHECK_OFF_DOT(PDM, IoApic.pDevInsR0);
+ GEN_CHECK_OFF_DOT(PDM, IoApic.pfnSetIrqR0);
+ GEN_CHECK_OFF_DOT(PDM, IoApic.pDevInsRC);
+ GEN_CHECK_OFF_DOT(PDM, IoApic.pfnSetIrqRC);
+ GEN_CHECK_OFF(PDM, pDmac);
+ GEN_CHECK_OFF(PDM, pRtc);
+ GEN_CHECK_OFF(PDM, pUsbHubs);
+ GEN_CHECK_OFF(PDM, pDevHlpQueueR3);
+ GEN_CHECK_OFF(PDM, pDevHlpQueueR0);
+ GEN_CHECK_OFF(PDM, pDevHlpQueueRC);
+ GEN_CHECK_OFF(PDMCPU, cQueuedCritSectLeaves);
+ GEN_CHECK_OFF(PDMCPU, apQueuedCritSectLeaves);
+ GEN_CHECK_OFF(PDMCPU, cQueuedCritSectRwExclLeaves);
+ GEN_CHECK_OFF(PDMCPU, apQueuedCritSectRwExclLeaves);
+ GEN_CHECK_OFF(PDMCPU, cQueuedCritSectRwShrdLeaves);
+ GEN_CHECK_OFF(PDMCPU, apQueuedCritSectRwShrdLeaves);
+ GEN_CHECK_OFF(PDM, pQueueFlushR0);
+ GEN_CHECK_OFF(PDM, pQueueFlushRC);
+ GEN_CHECK_OFF(PDM, StatQueuedCritSectLeaves);
+
+ GEN_CHECK_SIZE(PDMDEVINSINT);
+ GEN_CHECK_OFF(PDMDEVINSINT, pNextR3);
+ GEN_CHECK_OFF(PDMDEVINSINT, pPerDeviceNextR3);
+ GEN_CHECK_OFF(PDMDEVINSINT, pDevR3);
+ GEN_CHECK_OFF(PDMDEVINSINT, pLunsR3);
+ GEN_CHECK_OFF(PDMDEVINSINT, pfnAsyncNotify);
+ GEN_CHECK_OFF(PDMDEVINSINT, pCfgHandle);
+ GEN_CHECK_OFF(PDMDEVINSINT, pVMR3);
+ GEN_CHECK_OFF(PDMDEVINSINT, pVMR0);
+ GEN_CHECK_OFF(PDMDEVINSINT, pVMRC);
+ GEN_CHECK_OFF(PDMDEVINSINT, pHeadPciDevR3);
+ GEN_CHECK_OFF(PDMDEVINSINT, pHeadPciDevR0);
+ GEN_CHECK_OFF(PDMDEVINSINT, pHeadPciDevRC);
+ GEN_CHECK_OFF(PDMDEVINSINT, fIntFlags);
+ GEN_CHECK_OFF(PDMDEVINSINT, uLastIrqTag);
+ GEN_CHECK_OFF(PDMDEVINS, u32Version);
+ GEN_CHECK_OFF(PDMDEVINS, iInstance);
+ GEN_CHECK_OFF(PDMDEVINS, pHlpRC);
+ GEN_CHECK_OFF(PDMDEVINS, pvInstanceDataRC);
+ GEN_CHECK_OFF(PDMDEVINS, pHlpR0);
+ GEN_CHECK_OFF(PDMDEVINS, pvInstanceDataR0);
+ GEN_CHECK_OFF(PDMDEVINS, pHlpR3);
+ GEN_CHECK_OFF(PDMDEVINS, pvInstanceDataR3);
+ GEN_CHECK_OFF(PDMDEVINS, pReg);
+ GEN_CHECK_OFF(PDMDEVINS, pCfg);
+ GEN_CHECK_OFF(PDMDEVINS, IBase);
+ GEN_CHECK_OFF(PDMDEVINS, Internal);
+ GEN_CHECK_OFF(PDMDEVINS, achInstanceData);
+
+ GEN_CHECK_SIZE(PDMDRVINSINT);
+ GEN_CHECK_OFF(PDMDRVINSINT, pUp);
+ GEN_CHECK_OFF(PDMDRVINSINT, pDown);
+ GEN_CHECK_OFF(PDMDRVINSINT, pLun);
+ GEN_CHECK_OFF(PDMDRVINSINT, pDrv);
+ GEN_CHECK_OFF(PDMDRVINSINT, pVMR3);
+ GEN_CHECK_OFF(PDMDRVINSINT, pVMR0);
+ GEN_CHECK_OFF(PDMDRVINSINT, pVMRC);
+ GEN_CHECK_OFF(PDMDRVINSINT, fDetaching);
+ GEN_CHECK_OFF(PDMDRVINSINT, fVMSuspended);
+ GEN_CHECK_OFF(PDMDRVINSINT, fVMReset);
+ GEN_CHECK_OFF(PDMDRVINSINT, pfnAsyncNotify);
+ GEN_CHECK_OFF(PDMDRVINSINT, pCfgHandle);
+ GEN_CHECK_OFF(PDMDRVINS, u32Version);
+ GEN_CHECK_OFF(PDMDRVINS, iInstance);
+ GEN_CHECK_OFF(PDMDRVINS, pHlpRC);
+ GEN_CHECK_OFF(PDMDRVINS, pvInstanceDataRC);
+ GEN_CHECK_OFF(PDMDRVINS, pHlpR0);
+ GEN_CHECK_OFF(PDMDRVINS, pvInstanceDataR0);
+ GEN_CHECK_OFF(PDMDRVINS, pHlpR3);
+ GEN_CHECK_OFF(PDMDRVINS, pvInstanceDataR3);
+ GEN_CHECK_OFF(PDMDRVINS, pReg);
+ GEN_CHECK_OFF(PDMDRVINS, pCfg);
+ GEN_CHECK_OFF(PDMDRVINS, IBase);
+ GEN_CHECK_OFF(PDMDRVINS, Internal);
+ GEN_CHECK_OFF(PDMDRVINS, achInstanceData);
+
+ GEN_CHECK_SIZE(PDMCRITSECTINT);
+ GEN_CHECK_OFF(PDMCRITSECTINT, Core);
+ GEN_CHECK_OFF(PDMCRITSECTINT, pNext);
+ GEN_CHECK_OFF(PDMCRITSECTINT, pvKey);
+ GEN_CHECK_OFF(PDMCRITSECTINT, pVMR3);
+ GEN_CHECK_OFF(PDMCRITSECTINT, pVMR0);
+ GEN_CHECK_OFF(PDMCRITSECTINT, pVMRC);
+ GEN_CHECK_OFF(PDMCRITSECTINT, StatContentionRZLock);
+ GEN_CHECK_OFF(PDMCRITSECTINT, StatContentionRZUnlock);
+ GEN_CHECK_OFF(PDMCRITSECTINT, StatContentionR3);
+ GEN_CHECK_OFF(PDMCRITSECTINT, StatLocked);
+ GEN_CHECK_SIZE(PDMCRITSECT);
+ GEN_CHECK_SIZE(PDMCRITSECTRWINT);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, Core);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, pNext);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, pvKey);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, pVMR3);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, pVMR0);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, pVMRC);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, pszName);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, StatContentionRZEnterExcl);
+ GEN_CHECK_OFF(PDMCRITSECTRWINT, StatWriteLocked);
+ GEN_CHECK_SIZE(PDMCRITSECTRW);
+ GEN_CHECK_SIZE(PDMQUEUE);
+ GEN_CHECK_OFF(PDMQUEUE, pNext);
+ GEN_CHECK_OFF(PDMQUEUE, enmType);
+ GEN_CHECK_OFF(PDMQUEUE, u);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, u.Dev.pfnCallback);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, u.Dev.pDevIns);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, u.Drv.pfnCallback);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, u.Drv.pDrvIns);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, u.Int.pfnCallback);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, u.Ext.pfnCallback);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, u.Ext.pvUser);
+ GEN_CHECK_OFF(PDMQUEUE, pVMR3);
+ GEN_CHECK_OFF(PDMQUEUE, pVMR0);
+ GEN_CHECK_OFF(PDMQUEUE, pVMRC);
+ GEN_CHECK_OFF(PDMQUEUE, cMilliesInterval);
+ GEN_CHECK_OFF(PDMQUEUE, pTimer);
+ GEN_CHECK_OFF(PDMQUEUE, cbItem);
+ GEN_CHECK_OFF(PDMQUEUE, cItems);
+ GEN_CHECK_OFF(PDMQUEUE, pPendingR3);
+ GEN_CHECK_OFF(PDMQUEUE, pPendingR0);
+ GEN_CHECK_OFF(PDMQUEUE, pPendingRC);
+ GEN_CHECK_OFF(PDMQUEUE, iFreeHead);
+ GEN_CHECK_OFF(PDMQUEUE, iFreeTail);
+ GEN_CHECK_OFF(PDMQUEUE, pszName);
+ GEN_CHECK_OFF(PDMQUEUE, StatAllocFailures);
+ GEN_CHECK_OFF(PDMQUEUE, StatInsert);
+ GEN_CHECK_OFF(PDMQUEUE, StatFlush);
+ GEN_CHECK_OFF(PDMQUEUE, StatFlushLeftovers);
+ GEN_CHECK_OFF(PDMQUEUE, aFreeItems);
+ GEN_CHECK_OFF(PDMQUEUE, aFreeItems[1]);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, aFreeItems[0].pItemR3);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, aFreeItems[0].pItemR0);
+ GEN_CHECK_OFF_DOT(PDMQUEUE, aFreeItems[1].pItemRC);
+ GEN_CHECK_SIZE(PDMDEVHLPTASK);
+ GEN_CHECK_OFF(PDMDEVHLPTASK, Core);
+ GEN_CHECK_OFF(PDMDEVHLPTASK, pDevInsR3);
+ GEN_CHECK_OFF(PDMDEVHLPTASK, enmOp);
+ GEN_CHECK_OFF(PDMDEVHLPTASK, u);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.IsaSetIRQ.iIrq);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.IsaSetIRQ.iLevel);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.IsaSetIRQ.uTagSrc);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.IoApicSetIRQ.iIrq);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.IoApicSetIRQ.iLevel);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.IoApicSetIRQ.uTagSrc);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.PciSetIRQ.pPciDevR3);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.PciSetIRQ.iIrq);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.PciSetIRQ.iLevel);
+ GEN_CHECK_OFF_DOT(PDMDEVHLPTASK, u.PciSetIRQ.uTagSrc);
+
+ GEN_CHECK_SIZE(PGM);
+ GEN_CHECK_OFF(PGM, offVM);
+ GEN_CHECK_OFF(PGM, fRamPreAlloc);
+ GEN_CHECK_OFF(PGM, paDynPageMap32BitPTEsGC);
+ GEN_CHECK_OFF(PGM, paDynPageMapPaePTEsGC);
+ GEN_CHECK_OFF(PGM, enmHostMode);
+ GEN_CHECK_OFF(PGMCPU, offVM);
+ GEN_CHECK_OFF(PGMCPU, offVCpu);
+ GEN_CHECK_OFF(PGMCPU, offPGM);
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE) || defined(VBOX_WITH_RAW_MODE)
+ GEN_CHECK_OFF(PGMCPU, AutoSet);
+#endif
+ GEN_CHECK_OFF(PGMCPU, GCPhysA20Mask);
+ GEN_CHECK_OFF(PGMCPU, fA20Enabled);
+ GEN_CHECK_OFF(PGMCPU, fSyncFlags);
+ GEN_CHECK_OFF(PGMCPU, enmShadowMode);
+ GEN_CHECK_OFF(PGMCPU, enmGuestMode);
+ GEN_CHECK_OFF(PGMCPU, GCPhysCR3);
+ GEN_CHECK_OFF(PGM, GCPtrCR3Mapping);
+ GEN_CHECK_OFF(PGMCPU, pGst32BitPdR3);
+#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
+ GEN_CHECK_OFF(PGMCPU, pGst32BitPdR0);
+#endif
+ GEN_CHECK_OFF(PGMCPU, pGst32BitPdRC);
+ GEN_CHECK_OFF(PGMCPU, pGstPaePdptR3);
+#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
+ GEN_CHECK_OFF(PGMCPU, pGstPaePdptR0);
+#endif
+ GEN_CHECK_OFF(PGMCPU, pGstPaePdptRC);
+ GEN_CHECK_OFF(PGMCPU, apGstPaePDsR3);
+#ifndef VBOX_WITH_2X_4GB_ADDR_SPACE
+ GEN_CHECK_OFF(PGMCPU, apGstPaePDsR0);
+#endif
+ GEN_CHECK_OFF(PGMCPU, apGstPaePDsRC);
+ GEN_CHECK_OFF(PGMCPU, aGCPhysGstPaePDs);
+ GEN_CHECK_OFF(PGMCPU, aGCPhysGstPaePDsMonitored);
+ GEN_CHECK_OFF(PGMCPU, pShwPageCR3R3);
+ GEN_CHECK_OFF(PGMCPU, pShwPageCR3R0);
+ GEN_CHECK_OFF(PGMCPU, pShwPageCR3RC);
+ GEN_CHECK_OFF(PGMCPU, DisState);
+ GEN_CHECK_OFF(PGMCPU, cGuestModeChanges);
+#ifdef VBOX_WITH_STATISTICS
+ GEN_CHECK_OFF(PGMCPU, pStatsR0);
+ GEN_CHECK_OFF(PGMCPU, pStatTrap0eAttributionR0);
+ GEN_CHECK_OFF(PGMCPU, pStatsRC);
+ GEN_CHECK_OFF(PGMCPU, pStatTrap0eAttributionRC);
+ GEN_CHECK_OFF(PGMCPU, pStatsR3);
+#endif
+ GEN_CHECK_OFF(PGM, offVM);
+ GEN_CHECK_OFF(PGM, offVCpuPGM);
+ GEN_CHECK_OFF(PGM, fRamPreAlloc);
+ GEN_CHECK_OFF(PGM, paDynPageMap32BitPTEsGC);
+ GEN_CHECK_OFF(PGM, paDynPageMapPaePTEsGC);
+ GEN_CHECK_OFF(PGM, enmHostMode);
+ GEN_CHECK_OFF(PGM, fRestoreRomPagesOnReset);
+ GEN_CHECK_OFF(PGM, fZeroRamPagesOnReset);
+ GEN_CHECK_OFF(PGM, GCPhys4MBPSEMask);
+ GEN_CHECK_OFF(PGM, pRamRangesXR3);
+ GEN_CHECK_OFF(PGM, pRamRangesXR0);
+ GEN_CHECK_OFF(PGM, pRamRangesXRC);
+ GEN_CHECK_OFF(PGM, pRomRangesR3);
+ GEN_CHECK_OFF(PGM, pRomRangesR0);
+ GEN_CHECK_OFF(PGM, pRomRangesRC);
+ GEN_CHECK_OFF(PGM, pTreesR3);
+ GEN_CHECK_OFF(PGM, pTreesR0);
+ GEN_CHECK_OFF(PGM, pTreesRC);
+ GEN_CHECK_OFF(PGM, pMappingsR3);
+ GEN_CHECK_OFF(PGM, pMappingsRC);
+ GEN_CHECK_OFF(PGM, pMappingsR0);
+ GEN_CHECK_OFF(PGM, fFinalizedMappings);
+ GEN_CHECK_OFF(PGM, fMappingsFixed);
+ GEN_CHECK_OFF(PGM, fMappingsFixedRestored);
+ GEN_CHECK_OFF(PGM, GCPtrMappingFixed);
+ GEN_CHECK_OFF(PGM, cbMappingFixed);
+ GEN_CHECK_OFF(PGM, pInterPD);
+ GEN_CHECK_OFF(PGM, apInterPTs);
+ GEN_CHECK_OFF(PGM, apInterPaePTs);
+ GEN_CHECK_OFF(PGM, apInterPaePDs);
+ GEN_CHECK_OFF(PGM, pInterPaePDPT);
+ GEN_CHECK_OFF(PGM, pInterPaePDPT64);
+ GEN_CHECK_OFF(PGM, pInterPaePML4);
+ GEN_CHECK_OFF(PGM, HCPhysInterPD);
+ GEN_CHECK_OFF(PGM, HCPhysInterPaePDPT);
+ GEN_CHECK_OFF(PGM, HCPhysInterPaePML4);
+ GEN_CHECK_OFF(PGM, pbDynPageMapBaseGC);
+ GEN_CHECK_OFF(PGM, pRCDynMap);
+ GEN_CHECK_OFF(PGM, pvR0DynMapUsed);
+ GEN_CHECK_OFF(PGM, GCPhys4MBPSEMask);
+ GEN_CHECK_OFF(PGMCPU, GCPhysA20Mask);
+ GEN_CHECK_OFF(PGMCPU, fA20Enabled);
+ GEN_CHECK_OFF(PGMCPU, fSyncFlags);
+ GEN_CHECK_OFF(PGM, CritSectX);
+ GEN_CHECK_OFF(PGM, pPoolR3);
+ GEN_CHECK_OFF(PGM, pPoolR0);
+ GEN_CHECK_OFF(PGM, pPoolRC);
+ GEN_CHECK_OFF(PGM, fNoMorePhysWrites);
+ GEN_CHECK_OFF(PGM, ChunkR3Map);
+ GEN_CHECK_OFF_DOT(PGM, ChunkR3Map.pTree);
+ GEN_CHECK_OFF_DOT(PGM, ChunkR3Map.Tlb);
+ GEN_CHECK_OFF_DOT(PGM, ChunkR3Map.c);
+ GEN_CHECK_OFF_DOT(PGM, ChunkR3Map.cMax);
+ GEN_CHECK_OFF_DOT(PGM, ChunkR3Map.iNow);
+ GEN_CHECK_OFF(PGM, PhysTlbHC);
+ GEN_CHECK_OFF_DOT(PGM, PhysTlbHC.aEntries[0]);
+ GEN_CHECK_OFF_DOT(PGM, PhysTlbHC.aEntries[1]);
+ GEN_CHECK_OFF_DOT(PGM, PhysTlbHC.aEntries[1].GCPhys);
+ GEN_CHECK_OFF_DOT(PGM, PhysTlbHC.aEntries[1].pMap);
+ GEN_CHECK_OFF_DOT(PGM, PhysTlbHC.aEntries[1].pPage);
+ GEN_CHECK_OFF_DOT(PGM, PhysTlbHC.aEntries[1].pv);
+ GEN_CHECK_OFF(PGM, HCPhysZeroPg);
+ GEN_CHECK_OFF(PGM, pvZeroPgR3);
+ GEN_CHECK_OFF(PGM, pvZeroPgR0);
+ GEN_CHECK_OFF(PGM, pvZeroPgRC);
+ GEN_CHECK_OFF(PGM, cHandyPages);
+ GEN_CHECK_OFF(PGM, aHandyPages);
+ GEN_CHECK_OFF_DOT(PGM, aHandyPages[1]);
+ GEN_CHECK_OFF_DOT(PGM, aHandyPages[1].HCPhysGCPhys);
+ GEN_CHECK_OFF_DOT(PGM, aHandyPages[1].idPage);
+ GEN_CHECK_OFF_DOT(PGM, aHandyPages[1].idSharedPage);
+ GEN_CHECK_OFF(PGM, cAllPages);
+ GEN_CHECK_OFF(PGM, cPrivatePages);
+ GEN_CHECK_OFF(PGM, cSharedPages);
+ GEN_CHECK_OFF(PGM, cZeroPages);
+ GEN_CHECK_OFF(PGM, cPureMmioPages);
+ GEN_CHECK_OFF(PGM, cMonitoredPages);
+ GEN_CHECK_OFF(PGM, cWrittenToPages);
+ GEN_CHECK_OFF(PGM, cWriteLockedPages);
+ GEN_CHECK_OFF(PGM, cReadLockedPages);
+ GEN_CHECK_OFF(PGM, cRelocations);
+#ifdef VBOX_WITH_STATISTICS
+ GEN_CHECK_OFF(PGMCPU, pStatsR0);
+ GEN_CHECK_OFF(PGMCPU, pStatsRC);
+ GEN_CHECK_OFF(PGMCPU, pStatsR3);
+#endif
+
+ GEN_CHECK_SIZE(PGMMAPPING);
+ GEN_CHECK_OFF(PGMMAPPING, pNextR3);
+ GEN_CHECK_OFF(PGMMAPPING, pNextRC);
+ GEN_CHECK_OFF(PGMMAPPING, pNextR0);
+ GEN_CHECK_OFF(PGMMAPPING, GCPtr);
+ GEN_CHECK_OFF(PGMMAPPING, GCPtrLast);
+ GEN_CHECK_OFF(PGMMAPPING, cb);
+ GEN_CHECK_OFF(PGMMAPPING, pfnRelocate);
+ GEN_CHECK_OFF(PGMMAPPING, pvUser);
+ GEN_CHECK_OFF(PGMMAPPING, pszDesc);
+ GEN_CHECK_OFF(PGMMAPPING, cPTs);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].HCPhysPT);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].pPTR3);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].pPTR0);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].pPTRC);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].HCPhysPaePT0);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].HCPhysPaePT1);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].paPaePTsR3);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].paPaePTsRC);
+ GEN_CHECK_OFF_DOT(PGMMAPPING, aPTs[1].paPaePTsR0);
+ GEN_CHECK_SIZE(PGMPHYSHANDLER);
+ GEN_CHECK_OFF(PGMPHYSHANDLER, Core);
+ GEN_CHECK_SIZE(((PPGMPHYSHANDLER)0)->Core);
+ GEN_CHECK_OFF(PGMPHYSHANDLER, cPages);
+ GEN_CHECK_OFF(PGMPHYSHANDLER, cAliasedPages);
+ GEN_CHECK_OFF(PGMPHYSHANDLER, cTmpOffPages);
+ GEN_CHECK_OFF(PGMPHYSHANDLER, hType);
+ GEN_CHECK_OFF(PGMPHYSHANDLER, pvUserR3);
+ GEN_CHECK_OFF(PGMPHYSHANDLER, pvUserR0);
+ GEN_CHECK_OFF(PGMPHYSHANDLER, pvUserRC);
+ //GEN_CHECK_OFF(PGMPHYSHANDLER, pszDesc);
+ GEN_CHECK_SIZE(PGMPHYSHANDLERTYPEINT);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, u32Magic);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, cRefs);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, ListNode);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, enmKind);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, uState);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, pfnHandlerR3);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, pfnHandlerR0);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, pfnPfHandlerR0);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, pfnHandlerRC);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, pfnPfHandlerRC);
+ GEN_CHECK_OFF(PGMPHYSHANDLERTYPEINT, pszDesc);
+ GEN_CHECK_SIZE(PGMPHYS2VIRTHANDLER);
+ GEN_CHECK_OFF(PGMPHYS2VIRTHANDLER, Core);
+ GEN_CHECK_OFF(PGMPHYS2VIRTHANDLER, offVirtHandler);
+ GEN_CHECK_SIZE(PGMVIRTHANDLER);
+ GEN_CHECK_OFF(PGMVIRTHANDLER, Core);
+ GEN_CHECK_OFF(PGMVIRTHANDLER, hType);
+ GEN_CHECK_OFF(PGMVIRTHANDLER, cb);
+ GEN_CHECK_OFF(PGMVIRTHANDLER, cPages);
+ GEN_CHECK_OFF(PGMVIRTHANDLER, pszDesc);
+ GEN_CHECK_OFF(PGMVIRTHANDLER, aPhysToVirt);
+ GEN_CHECK_SIZE(PGMVIRTHANDLERTYPEINT);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, u32Magic);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, cRefs);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, ListNode);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, enmKind);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, uState);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, fRelocUserRC);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, pfnHandlerRC);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, pfnPfHandlerRC);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, pfnInvalidateR3);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, pfnHandlerR3);
+ GEN_CHECK_OFF(PGMVIRTHANDLERTYPEINT, pszDesc);
+ GEN_CHECK_SIZE(PGMPAGE);
+ GEN_CHECK_OFF_DOT(PGMPAGE, s.cReadLocksY);
+ GEN_CHECK_OFF_DOT(PGMPAGE, s.cWriteLocksY);
+ GEN_CHECK_OFF_DOT(PGMPAGE, s.u16TrackingY);
+ GEN_CHECK_SIZE(PGMRAMRANGE);
+ GEN_CHECK_OFF(PGMRAMRANGE, pNextR3);
+ GEN_CHECK_OFF(PGMRAMRANGE, pNextR0);
+ GEN_CHECK_OFF(PGMRAMRANGE, pNextRC);
+ GEN_CHECK_OFF(PGMRAMRANGE, GCPhys);
+ GEN_CHECK_OFF(PGMRAMRANGE, GCPhysLast);
+ GEN_CHECK_OFF(PGMRAMRANGE, cb);
+ GEN_CHECK_OFF(PGMRAMRANGE, fFlags);
+ GEN_CHECK_OFF(PGMRAMRANGE, pvR3);
+ GEN_CHECK_OFF(PGMRAMRANGE, pszDesc);
+ GEN_CHECK_OFF(PGMRAMRANGE, aPages);
+ GEN_CHECK_OFF(PGMRAMRANGE, aPages[1]);
+ GEN_CHECK_SIZE(PGMROMPAGE);
+ GEN_CHECK_OFF(PGMROMPAGE, Virgin);
+ GEN_CHECK_OFF(PGMROMPAGE, Shadow);
+ GEN_CHECK_OFF(PGMROMPAGE, enmProt);
+ GEN_CHECK_SIZE(PGMROMRANGE);
+ GEN_CHECK_OFF(PGMROMRANGE, pNextR3);
+ GEN_CHECK_OFF(PGMROMRANGE, pNextR0);
+ GEN_CHECK_OFF(PGMROMRANGE, pNextRC);
+ GEN_CHECK_OFF(PGMROMRANGE, GCPhys);
+ GEN_CHECK_OFF(PGMROMRANGE, GCPhysLast);
+ GEN_CHECK_OFF(PGMROMRANGE, cb);
+ GEN_CHECK_OFF(PGMROMRANGE, fFlags);
+ GEN_CHECK_OFF(PGMROMRANGE, cbOriginal);
+ GEN_CHECK_OFF(PGMROMRANGE, pvOriginal);
+ GEN_CHECK_OFF(PGMROMRANGE, pszDesc);
+ GEN_CHECK_OFF(PGMROMRANGE, aPages);
+ GEN_CHECK_OFF(PGMROMRANGE, aPages[1]);
+ GEN_CHECK_SIZE(PGMREGMMIORANGE);
+ GEN_CHECK_OFF(PGMREGMMIORANGE, pDevInsR3);
+ GEN_CHECK_OFF(PGMREGMMIORANGE, pNextR3);
+ GEN_CHECK_OFF(PGMREGMMIORANGE, fFlags);
+ GEN_CHECK_OFF(PGMREGMMIORANGE, iRegion);
+ GEN_CHECK_OFF(PGMREGMMIORANGE, pPhysHandlerR3);
+ GEN_CHECK_OFF(PGMREGMMIORANGE, RamRange);
+ GEN_CHECK_SIZE(PGMTREES);
+ GEN_CHECK_OFF(PGMTREES, PhysHandlers);
+ GEN_CHECK_OFF(PGMTREES, HeadPhysHandlerTypes);
+#ifdef VBOX_WITH_RAW_MODE
+ GEN_CHECK_OFF(PGMTREES, VirtHandlers);
+ GEN_CHECK_OFF(PGMTREES, PhysToVirtHandlers);
+ GEN_CHECK_OFF(PGMTREES, HyperVirtHandlers);
+ GEN_CHECK_OFF(PGMTREES, HeadVirtHandlerTypes);
+#endif
+ GEN_CHECK_SIZE(PGMPOOLPAGE);
+ GEN_CHECK_OFF(PGMPOOLPAGE, Core);
+ GEN_CHECK_OFF(PGMPOOLPAGE, GCPhys);
+ GEN_CHECK_OFF(PGMPOOLPAGE, pvPageR3);
+ GEN_CHECK_OFF(PGMPOOLPAGE, enmKind);
+ GEN_CHECK_OFF(PGMPOOLPAGE, enmAccess);
+ //GEN_CHECK_OFF(PGMPOOLPAGE, fA20Enabled);
+ //GEN_CHECK_OFF(PGMPOOLPAGE, fSeenNonGlobal);
+ //GEN_CHECK_OFF(PGMPOOLPAGE, fMonitored);
+ //GEN_CHECK_OFF(PGMPOOLPAGE, fCached);
+ //GEN_CHECK_OFF(PGMPOOLPAGE, fReusedFlushPending);
+ GEN_CHECK_OFF(PGMPOOLPAGE, idx);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iNext);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iUserHead);
+ GEN_CHECK_OFF(PGMPOOLPAGE, cPresent);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iFirstPresent);
+ GEN_CHECK_OFF(PGMPOOLPAGE, cModifications);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iModifiedNext);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iModifiedPrev);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iMonitoredNext);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iMonitoredPrev);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iAgeNext);
+ GEN_CHECK_OFF(PGMPOOLPAGE, iAgePrev);
+ GEN_CHECK_OFF(PGMPOOLPAGE, idxDirtyEntry);
+ GEN_CHECK_OFF(PGMPOOLPAGE, GCPtrLastAccessHandlerRip);
+ GEN_CHECK_OFF(PGMPOOLPAGE, GCPtrLastAccessHandlerFault);
+ GEN_CHECK_OFF(PGMPOOLPAGE, cLastAccessHandler);
+ GEN_CHECK_OFF(PGMPOOLPAGE, cLocked);
+#ifdef VBOX_STRICT
+ GEN_CHECK_OFF(PGMPOOLPAGE, GCPtrDirtyFault);
+#endif
+ GEN_CHECK_SIZE(PGMPOOL);
+ GEN_CHECK_OFF(PGMPOOL, pVMR3);
+ GEN_CHECK_OFF(PGMPOOL, pVMR0);
+ GEN_CHECK_OFF(PGMPOOL, pVMRC);
+ GEN_CHECK_OFF(PGMPOOL, cMaxPages);
+ GEN_CHECK_OFF(PGMPOOL, cCurPages);
+ GEN_CHECK_OFF(PGMPOOL, iFreeHead);
+ GEN_CHECK_OFF(PGMPOOL, u16Padding);
+#ifdef PGMPOOL_WITH_USER_TRACKING
+ GEN_CHECK_OFF(PGMPOOL, iUserFreeHead);
+ GEN_CHECK_OFF(PGMPOOL, cMaxUsers);
+ GEN_CHECK_OFF(PGMPOOL, cPresent);
+ GEN_CHECK_OFF(PGMPOOL, paUsersR3);
+ GEN_CHECK_OFF(PGMPOOL, paUsersR0);
+ GEN_CHECK_OFF(PGMPOOL, paUsersRC);
+#endif /* PGMPOOL_WITH_USER_TRACKING */
+#ifdef PGMPOOL_WITH_GCPHYS_TRACKING
+ GEN_CHECK_OFF(PGMPOOL, iPhysExtFreeHead);
+ GEN_CHECK_OFF(PGMPOOL, cMaxPhysExts);
+ GEN_CHECK_OFF(PGMPOOL, paPhysExtsR3);
+ GEN_CHECK_OFF(PGMPOOL, paPhysExtsR0);
+ GEN_CHECK_OFF(PGMPOOL, paPhysExtsRC);
+#endif
+#ifdef PGMPOOL_WITH_CACHE
+ GEN_CHECK_OFF(PGMPOOL, aiHash);
+ GEN_CHECK_OFF(PGMPOOL, iAgeHead);
+ GEN_CHECK_OFF(PGMPOOL, iAgeTail);
+ GEN_CHECK_OFF(PGMPOOL, fCacheEnabled);
+#endif
+#ifdef PGMPOOL_WITH_MONITORING
+ GEN_CHECK_OFF(PGMPOOL, pfnAccessHandlerRC);
+ GEN_CHECK_OFF(PGMPOOL, pfnAccessHandlerR0);
+ GEN_CHECK_OFF(PGMPOOL, pfnAccessHandlerR3);
+ GEN_CHECK_OFF(PGMPOOL, pszAccessHandler);
+ GEN_CHECK_OFF(PGMPOOL, iModifiedHead);
+ GEN_CHECK_OFF(PGMPOOL, cModifiedPages);
+#endif
+ GEN_CHECK_OFF(PGMPOOL, cUsedPages);
+#ifdef VBOX_WITH_STATISTICS
+ GEN_CHECK_OFF(PGMPOOL, cUsedPagesHigh);
+ GEN_CHECK_OFF(PGMPOOL, StatAlloc);
+ GEN_CHECK_OFF(PGMPOOL, StatClearAll);
+#endif
+ GEN_CHECK_OFF(PGMPOOL, HCPhysTree);
+ GEN_CHECK_OFF(PGMPOOL, aPages);
+ GEN_CHECK_OFF(PGMPOOL, aPages[1]);
+ GEN_CHECK_OFF(PGMPOOL, aPages[PGMPOOL_IDX_FIRST - 1]);
+ GEN_CHECK_SIZE(PGMRCDYNMAP);
+ GEN_CHECK_OFF(PGMRCDYNMAP, u32Magic);
+ GEN_CHECK_OFF(PGMRCDYNMAP, paPages);
+ GEN_CHECK_OFF(PGMRCDYNMAP, cPages);
+ GEN_CHECK_OFF(PGMRCDYNMAP, cLoad);
+ GEN_CHECK_OFF(PGMRCDYNMAP, cMaxLoad);
+ GEN_CHECK_OFF(PGMRCDYNMAP, cGuardPages);
+ GEN_CHECK_OFF(PGMRCDYNMAP, cUsers);
+ GEN_CHECK_SIZE(PGMRCDYNMAPENTRY);
+ GEN_CHECK_OFF(PGMRCDYNMAPENTRY, HCPhys);
+ GEN_CHECK_OFF(PGMRCDYNMAPENTRY, pvPage);
+ GEN_CHECK_OFF(PGMRCDYNMAPENTRY, cRefs);
+ GEN_CHECK_OFF_DOT(PGMRCDYNMAPENTRY, uPte.pLegacy);
+ GEN_CHECK_OFF_DOT(PGMRCDYNMAPENTRY, uPte.pPae);
+ GEN_CHECK_OFF(PGMMAPSETENTRY, pvPage);
+ GEN_CHECK_OFF(PGMMAPSETENTRY, iPage);
+ GEN_CHECK_OFF(PGMMAPSETENTRY, cRefs);
+ GEN_CHECK_OFF(PGMMAPSETENTRY, cInlinedRefs);
+ GEN_CHECK_OFF(PGMMAPSETENTRY, cUnrefs);
+ GEN_CHECK_OFF(PGMMAPSETENTRY, HCPhys);
+
+ GEN_CHECK_SIZE(REM);
+ GEN_CHECK_OFF(REM, pCtx);
+ GEN_CHECK_OFF(REM, cCanExecuteRaw);
+ GEN_CHECK_OFF(REM, aGCPtrInvalidatedPages);
+ GEN_CHECK_OFF(REM, idxPendingList);
+ GEN_CHECK_OFF(REM, aHandlerNotifications);
+ GEN_CHECK_OFF(REM, idxFreeList);
+ GEN_CHECK_OFF(REM, CritSectRegister);
+ GEN_CHECK_OFF(REM, rc);
+ GEN_CHECK_OFF(REM, StatsInQEMU);
+ GEN_CHECK_OFF(REM, Env);
+
+ GEN_CHECK_SIZE(REMHANDLERNOTIFICATION);
+ GEN_CHECK_OFF(REMHANDLERNOTIFICATION, enmKind);
+ GEN_CHECK_OFF(REMHANDLERNOTIFICATION, u);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalRegister.GCPhys);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalRegister.cb);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalRegister.enmKind);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalRegister.fHasHCHandler);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalDeregister.GCPhys);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalDeregister.cb);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalDeregister.enmKind);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalDeregister.fHasHCHandler);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalDeregister.fRestoreAsRAM);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalModify.GCPhysOld);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalModify.GCPhysNew);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalModify.cb);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalModify.enmKind);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalModify.fHasHCHandler);
+ GEN_CHECK_OFF_DOT(REMHANDLERNOTIFICATION, u.PhysicalModify.fRestoreAsRAM);
+ GEN_CHECK_OFF(REMHANDLERNOTIFICATION, idxSelf);
+ GEN_CHECK_OFF(REMHANDLERNOTIFICATION, idxNext);
+
+ GEN_CHECK_SIZE(SELM);
+ GEN_CHECK_OFF(SELM, offVM);
+ GEN_CHECK_OFF(SELM, aHyperSel[SELM_HYPER_SEL_CS]);
+ GEN_CHECK_OFF(SELM, aHyperSel[SELM_HYPER_SEL_DS]);
+ GEN_CHECK_OFF(SELM, aHyperSel[SELM_HYPER_SEL_CS64]);
+ GEN_CHECK_OFF(SELM, aHyperSel[SELM_HYPER_SEL_TSS]);
+ GEN_CHECK_OFF(SELM, aHyperSel[SELM_HYPER_SEL_TSS_TRAP08]);
+ GEN_CHECK_OFF(SELM, hShadowGdtWriteHandlerType);
+ GEN_CHECK_OFF(SELM, hGuestGdtWriteHandlerType);
+ GEN_CHECK_OFF(SELM, paGdtR3);
+ GEN_CHECK_OFF(SELM, paGdtRC);
+ GEN_CHECK_OFF(SELM, GuestGdtr);
+ GEN_CHECK_OFF(SELM, cbEffGuestGdtLimit);
+ GEN_CHECK_OFF(SELM, hShadowLdtWriteHandlerType);
+ GEN_CHECK_OFF(SELM, hGuestLdtWriteHandlerType);
+ GEN_CHECK_OFF(SELM, pvLdtR3);
+ GEN_CHECK_OFF(SELM, pvLdtRC);
+ GEN_CHECK_OFF(SELM, GCPtrGuestLdt);
+ GEN_CHECK_OFF(SELM, cbLdtLimit);
+ GEN_CHECK_OFF(SELM, offLdtHyper);
+ GEN_CHECK_OFF(SELM, Tss);
+ GEN_CHECK_OFF(SELM, TssTrap08);
+ GEN_CHECK_OFF(SELM, hShadowTssWriteHandlerType);
+ GEN_CHECK_OFF(SELM, hGuestTssWriteHandlerType);
+ GEN_CHECK_OFF(SELM, pvMonShwTssRC);
+ GEN_CHECK_OFF(SELM, GCPtrGuestTss);
+ GEN_CHECK_OFF(SELM, cbGuestTss);
+ GEN_CHECK_OFF(SELM, fGuestTss32Bit);
+ GEN_CHECK_OFF(SELM, cbMonitoredGuestTss);
+ GEN_CHECK_OFF(SELM, GCSelTss);
+ GEN_CHECK_OFF(SELM, fGDTRangeRegistered);
+ GEN_CHECK_OFF(SELM, StatUpdateFromCPUM);
+ GEN_CHECK_OFF(SELM, StatStaleToUnstaleSReg);
+ GEN_CHECK_OFF(SELM, StatLoadHidSelGstNoGood);
+
+ GEN_CHECK_SIZE(TM);
+ GEN_CHECK_OFF(TM, offVM);
+ GEN_CHECK_OFF(TM, pvGIPR3);
+ //GEN_CHECK_OFF(TM, pvGIPR0);
+ GEN_CHECK_OFF(TM, pvGIPRC);
+ GEN_CHECK_OFF(TMCPU, fTSCTicking);
+ GEN_CHECK_OFF(TM, enmTSCMode);
+ GEN_CHECK_OFF(TM, fTSCTiedToExecution);
+ GEN_CHECK_OFF(TMCPU, offTSCRawSrc);
+ GEN_CHECK_OFF(TMCPU, u64TSC);
+ GEN_CHECK_OFF(TM, cTSCTicksPerSecond);
+ GEN_CHECK_OFF(TM, cVirtualTicking);
+ GEN_CHECK_OFF(TM, fVirtualWarpDrive);
+ GEN_CHECK_OFF(TM, fVirtualSyncTicking);
+ GEN_CHECK_OFF(TM, fVirtualSyncCatchUp);
+ GEN_CHECK_OFF(TM, u32VirtualWarpDrivePercentage);
+ GEN_CHECK_OFF(TM, u64VirtualOffset);
+ GEN_CHECK_OFF(TM, u64Virtual);
+ GEN_CHECK_OFF(TM, u64VirtualRawPrev);
+ GEN_CHECK_OFF(TM, VirtualGetRawDataR3);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR3.pu64Prev);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR3.pfnBad);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR3.pfnRediscover);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR3.c1nsSteps);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR3.cBadPrev);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR3.cExpired);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR3.cUpdateRaces);
+ GEN_CHECK_OFF(TM, VirtualGetRawDataR0);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR0.pu64Prev);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR0.pfnBad);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR0.pfnRediscover);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR0.c1nsSteps);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR0.cBadPrev);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR0.cExpired);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataR0.cUpdateRaces);
+ GEN_CHECK_OFF(TM, VirtualGetRawDataRC);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataRC.pu64Prev);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataRC.pfnBad);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataRC.pfnRediscover);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataRC.c1nsSteps);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataRC.cBadPrev);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataRC.cExpired);
+ GEN_CHECK_OFF_DOT(TM, VirtualGetRawDataRC.cUpdateRaces);
+ GEN_CHECK_OFF(TM, pfnVirtualGetRawR3);
+ GEN_CHECK_OFF(TM, pfnVirtualGetRawR0);
+ GEN_CHECK_OFF(TM, pfnVirtualGetRawRC);
+ GEN_CHECK_OFF(TM, u64VirtualWarpDriveStart);
+ GEN_CHECK_OFF(TM, u64VirtualSync);
+ GEN_CHECK_OFF(TM, offVirtualSync);
+ GEN_CHECK_OFF(TM, offVirtualSyncGivenUp);
+ GEN_CHECK_OFF(TM, u64VirtualSyncCatchUpPrev);
+ GEN_CHECK_OFF(TM, u32VirtualSyncCatchUpPercentage);
+ GEN_CHECK_OFF(TM, u32VirtualSyncScheduleSlack);
+ GEN_CHECK_OFF(TM, u64VirtualSyncCatchUpStopThreshold);
+ GEN_CHECK_OFF(TM, u64VirtualSyncCatchUpGiveUpThreshold);
+ GEN_CHECK_OFF(TM, aVirtualSyncCatchUpPeriods);
+ GEN_CHECK_OFF_DOT(TM, aVirtualSyncCatchUpPeriods[0].u64Start);
+ GEN_CHECK_OFF_DOT(TM, aVirtualSyncCatchUpPeriods[0].u32Percentage);
+ GEN_CHECK_OFF_DOT(TM, aVirtualSyncCatchUpPeriods[1].u64Start);
+ GEN_CHECK_OFF_DOT(TM, aVirtualSyncCatchUpPeriods[1].u32Percentage);
+ GEN_CHECK_OFF(TM, pTimer);
+ GEN_CHECK_OFF(TM, u32TimerMillies);
+ GEN_CHECK_OFF(TM, pFree);
+ GEN_CHECK_OFF(TM, pCreated);
+ GEN_CHECK_OFF(TM, paTimerQueuesR3);
+ GEN_CHECK_OFF(TM, paTimerQueuesR0);
+ GEN_CHECK_OFF(TM, paTimerQueuesRC);
+ GEN_CHECK_OFF(TM, TimerCritSect);
+ GEN_CHECK_OFF(TM, VirtualSyncLock);
+ GEN_CHECK_OFF(TM, StatDoQueues);
+ GEN_CHECK_OFF(TM, StatTimerCallbackSetFF);
+ GEN_CHECK_SIZE(TMTIMER);
+ GEN_CHECK_OFF(TMTIMER, u64Expire);
+ GEN_CHECK_OFF(TMTIMER, enmClock);
+ GEN_CHECK_OFF(TMTIMER, enmType);
+ GEN_CHECK_OFF_DOT(TMTIMER, u.Dev.pfnTimer);
+ GEN_CHECK_OFF_DOT(TMTIMER, u.Dev.pDevIns);
+ GEN_CHECK_OFF_DOT(TMTIMER, u.Drv.pfnTimer);
+ GEN_CHECK_OFF_DOT(TMTIMER, u.Drv.pDrvIns);
+ GEN_CHECK_OFF_DOT(TMTIMER, u.Internal.pfnTimer);
+ GEN_CHECK_OFF_DOT(TMTIMER, u.External.pfnTimer);
+ GEN_CHECK_OFF(TMTIMER, enmState);
+ GEN_CHECK_OFF(TMTIMER, offScheduleNext);
+ GEN_CHECK_OFF(TMTIMER, offNext);
+ GEN_CHECK_OFF(TMTIMER, offPrev);
+ GEN_CHECK_OFF(TMTIMER, pVMR0);
+ GEN_CHECK_OFF(TMTIMER, pVMR3);
+ GEN_CHECK_OFF(TMTIMER, pVMRC);
+ GEN_CHECK_OFF(TMTIMER, uHzHint);
+ GEN_CHECK_OFF(TMTIMER, pvUser);
+ GEN_CHECK_OFF(TMTIMER, pCritSect);
+ GEN_CHECK_OFF(TMTIMER, pBigNext);
+ GEN_CHECK_OFF(TMTIMER, pBigPrev);
+ GEN_CHECK_OFF(TMTIMER, pszDesc);
+ GEN_CHECK_SIZE(TMTIMERQUEUE);
+ GEN_CHECK_OFF(TMTIMERQUEUE, offActive);
+ GEN_CHECK_OFF(TMTIMERQUEUE, offSchedule);
+ GEN_CHECK_OFF(TMTIMERQUEUE, enmClock);
+
+ GEN_CHECK_SIZE(TRPM);
+ GEN_CHECK_SIZE(TRPMCPU);
+ GEN_CHECK_SIZE(VM); // has .mac
+ GEN_CHECK_SIZE(VMM);
+ GEN_CHECK_OFF(VMM, offVM);
+ GEN_CHECK_OFF(VMM, cbCoreCode);
+ GEN_CHECK_OFF(VMM, HCPhysCoreCode);
+ GEN_CHECK_OFF(VMM, pvCoreCodeR3);
+ GEN_CHECK_OFF(VMM, pvCoreCodeR0);
+ GEN_CHECK_OFF(VMM, pvCoreCodeRC);
+ GEN_CHECK_OFF(VMM, enmSwitcher);
+ GEN_CHECK_OFF(VMM, aoffSwitchers);
+ GEN_CHECK_OFF_DOT(VMM, aoffSwitchers[1]);
+ GEN_CHECK_OFF(VMM, pfnR0ToRawMode);
+ GEN_CHECK_OFF(VMM, pfnRCToHost);
+ GEN_CHECK_OFF(VMM, pfnCallTrampolineRC);
+ GEN_CHECK_OFF(VMM, pfnCPUMRCResumeGuest);
+ GEN_CHECK_OFF(VMM, pfnCPUMRCResumeGuestV86);
+ GEN_CHECK_OFF(VMM, pRCLoggerRC);
+ GEN_CHECK_OFF(VMM, pRCLoggerR3);
+ GEN_CHECK_OFF(VMM, cbRCLogger);
+ GEN_CHECK_OFF(VMM, fRCLoggerFlushingDisabled);
+ GEN_CHECK_OFF(VMM, fStackGuardsStationed);
+ GEN_CHECK_OFF(VMM, fUsePeriodicPreemptionTimers);
+ GEN_CHECK_OFF(VMM, pYieldTimer);
+ GEN_CHECK_OFF(VMM, cYieldResumeMillies);
+ GEN_CHECK_OFF(VMM, cYieldEveryMillies);
+ GEN_CHECK_OFF(VMM, pahEvtRendezvousEnterOrdered);
+ GEN_CHECK_OFF(VMM, hEvtRendezvousEnterOneByOne);
+ GEN_CHECK_OFF(VMM, hEvtMulRendezvousEnterAllAtOnce);
+ GEN_CHECK_OFF(VMM, hEvtMulRendezvousDone);
+ GEN_CHECK_OFF(VMM, hEvtRendezvousDoneCaller);
+ GEN_CHECK_OFF(VMM, pfnRendezvous);
+ GEN_CHECK_OFF(VMM, pvRendezvousUser);
+ GEN_CHECK_OFF(VMM, fRendezvousFlags);
+ GEN_CHECK_OFF(VMM, cRendezvousEmtsEntered);
+ GEN_CHECK_OFF(VMM, cRendezvousEmtsDone);
+ GEN_CHECK_OFF(VMM, cRendezvousEmtsReturned);
+ GEN_CHECK_OFF(VMM, i32RendezvousStatus);
+ GEN_CHECK_OFF(VMM, u32RendezvousLock);
+ GEN_CHECK_OFF(VMM, szRing0AssertMsg1);
+ GEN_CHECK_OFF(VMM, szRing0AssertMsg2);
+ GEN_CHECK_OFF(VMM, StatRunRC);
+ GEN_CHECK_OFF(VMM, StatRZCallPGMLock);
+ GEN_CHECK_OFF(VMMCPU, iLastGZRc);
+ GEN_CHECK_OFF(VMMCPU, pbEMTStackR3);
+ GEN_CHECK_OFF(VMMCPU, pbEMTStackRC);
+ GEN_CHECK_OFF(VMMCPU, pbEMTStackBottomRC);
+#ifdef LOG_ENABLED
+ GEN_CHECK_OFF(VMMCPU, pR0LoggerR0);
+ GEN_CHECK_OFF(VMMCPU, pR0LoggerR3);
+#endif
+ GEN_CHECK_OFF(VMMCPU, cCallRing3Disabled);
+ GEN_CHECK_OFF(VMMCPU, enmCallRing3Operation);
+ GEN_CHECK_OFF(VMMCPU, rcCallRing3);
+ GEN_CHECK_OFF(VMMCPU, u64CallRing3Arg);
+ GEN_CHECK_OFF(VMMCPU, CallRing3JmpBufR0);
+ GEN_CHECK_OFF_DOT(VMMCPU, CallRing3JmpBufR0.SpCheck);
+ GEN_CHECK_OFF_DOT(VMMCPU, CallRing3JmpBufR0.SpResume);
+
+ GEN_CHECK_SIZE(RTPINGPONG);
+ GEN_CHECK_SIZE(RTCRITSECT);
+ GEN_CHECK_OFF(RTCRITSECT, u32Magic);
+ GEN_CHECK_OFF(RTCRITSECT, cLockers);
+ GEN_CHECK_OFF(RTCRITSECT, NativeThreadOwner);
+ GEN_CHECK_OFF(RTCRITSECT, cNestings);
+ GEN_CHECK_OFF(RTCRITSECT, fFlags);
+ GEN_CHECK_OFF(RTCRITSECT, EventSem);
+ GEN_CHECK_OFF(RTCRITSECT, pValidatorRec);
+
+ GEN_CHECK_SIZE(CSAM);
+ GEN_CHECK_OFF(CSAM, offVM);
+ GEN_CHECK_OFF(CSAM, pPageTree);
+ GEN_CHECK_OFF(CSAM, aDangerousInstr);
+ GEN_CHECK_OFF(CSAM, aDangerousInstr[1]);
+ GEN_CHECK_OFF(CSAM, aDangerousInstr[CSAM_MAX_DANGR_INSTR - 1]);
+ GEN_CHECK_OFF(CSAM, cDangerousInstr);
+ GEN_CHECK_OFF(CSAM, iDangerousInstr);
+ GEN_CHECK_OFF(CSAM, pPDBitmapGC);
+ GEN_CHECK_OFF(CSAM, pPDHCBitmapGC);
+ GEN_CHECK_OFF(CSAM, pPDBitmapHC);
+ GEN_CHECK_OFF(CSAM, pPDGCBitmapHC);
+ GEN_CHECK_OFF(CSAM, savedstate);
+ GEN_CHECK_OFF_DOT(CSAM, savedstate.pSSM);
+ GEN_CHECK_OFF_DOT(CSAM, savedstate.cPageRecords);
+ GEN_CHECK_OFF_DOT(CSAM, savedstate.cPatchPageRecords);
+ GEN_CHECK_OFF(CSAM, cDirtyPages);
+ GEN_CHECK_OFF(CSAM, pvDirtyBasePage);
+ GEN_CHECK_OFF_DOT(CSAM, pvDirtyBasePage[1]);
+ GEN_CHECK_OFF_DOT(CSAM, pvDirtyBasePage[CSAM_MAX_DIRTY_PAGES - 1]);
+ GEN_CHECK_OFF(CSAM, pvDirtyFaultPage);
+ GEN_CHECK_OFF_DOT(CSAM, pvDirtyFaultPage[1]);
+ GEN_CHECK_OFF_DOT(CSAM, pvDirtyFaultPage[CSAM_MAX_DIRTY_PAGES - 1]);
+ GEN_CHECK_OFF(CSAM, pvCallInstruction);
+ GEN_CHECK_OFF(CSAM, iCallInstruction);
+ GEN_CHECK_OFF(CSAM, fScanningStarted);
+ GEN_CHECK_OFF(CSAM, fGatesChecked);
+ GEN_CHECK_OFF(CSAM, StatNrTraps);
+ GEN_CHECK_OFF(CSAM, StatNrPages);
+
+ GEN_CHECK_SIZE(PATM);
+ GEN_CHECK_OFF(PATM, offVM);
+ GEN_CHECK_OFF(PATM, pPatchMemGC);
+ GEN_CHECK_OFF(PATM, pPatchMemHC);
+ GEN_CHECK_OFF(PATM, cbPatchMem);
+ GEN_CHECK_OFF(PATM, offPatchMem);
+ GEN_CHECK_OFF(PATM, fOutOfMemory);
+ GEN_CHECK_OFF(PATM, deltaReloc);
+ GEN_CHECK_OFF(PATM, pGCStateGC);
+ GEN_CHECK_OFF(PATM, pGCStateHC);
+ GEN_CHECK_OFF(PATM, pGCStackGC);
+ GEN_CHECK_OFF(PATM, pGCStackHC);
+ GEN_CHECK_OFF(PATM, pCPUMCtxGC);
+ GEN_CHECK_OFF(PATM, pStatsGC);
+ GEN_CHECK_OFF(PATM, pStatsHC);
+ GEN_CHECK_OFF(PATM, uCurrentPatchIdx);
+ GEN_CHECK_OFF(PATM, ulCallDepth);
+ GEN_CHECK_OFF(PATM, cPageRecords);
+ GEN_CHECK_OFF(PATM, pPatchedInstrGCLowest);
+ GEN_CHECK_OFF(PATM, pPatchedInstrGCHighest);
+ GEN_CHECK_OFF(PATM, PatchLookupTreeHC);
+ GEN_CHECK_OFF(PATM, PatchLookupTreeGC);
+ GEN_CHECK_OFF(PATM, pfnHelperCallGC);
+ GEN_CHECK_OFF(PATM, pfnHelperRetGC);
+ GEN_CHECK_OFF(PATM, pfnHelperJumpGC);
+ GEN_CHECK_OFF(PATM, pfnHelperIretGC);
+ GEN_CHECK_OFF(PATM, pGlobalPatchRec);
+ GEN_CHECK_OFF(PATM, pfnSysEnterGC);
+ GEN_CHECK_OFF(PATM, pfnSysEnterPatchGC);
+ GEN_CHECK_OFF(PATM, uSysEnterPatchIdx);
+ GEN_CHECK_OFF(PATM, pvFaultMonitor);
+ GEN_CHECK_OFF(PATM, mmio);
+ GEN_CHECK_OFF_DOT(PATM, mmio.GCPhys);
+ GEN_CHECK_OFF_DOT(PATM, mmio.pCachedData);
+ GEN_CHECK_OFF(PATM, savedstate);
+ GEN_CHECK_OFF_DOT(PATM, savedstate.pSSM);
+ GEN_CHECK_OFF_DOT(PATM, savedstate.cPatches);
+ GEN_CHECK_OFF(PATM, StatNrOpcodeRead);
+ GEN_CHECK_OFF(PATM, StatU32FunctionMaxSlotsUsed);
+
+ GEN_CHECK_SIZE(PATMGCSTATE);
+ GEN_CHECK_OFF(PATMGCSTATE, uVMFlags);
+ GEN_CHECK_OFF(PATMGCSTATE, uPendingAction);
+ GEN_CHECK_OFF(PATMGCSTATE, uPatchCalls);
+ GEN_CHECK_OFF(PATMGCSTATE, uScratch);
+ GEN_CHECK_OFF(PATMGCSTATE, uIretEFlags);
+ GEN_CHECK_OFF(PATMGCSTATE, uIretCS);
+ GEN_CHECK_OFF(PATMGCSTATE, uIretEIP);
+ GEN_CHECK_OFF(PATMGCSTATE, Psp);
+ GEN_CHECK_OFF(PATMGCSTATE, fPIF);
+ GEN_CHECK_OFF(PATMGCSTATE, GCPtrInhibitInterrupts);
+ GEN_CHECK_OFF(PATMGCSTATE, Restore);
+ GEN_CHECK_OFF_DOT(PATMGCSTATE, Restore.uEAX);
+ GEN_CHECK_OFF_DOT(PATMGCSTATE, Restore.uECX);
+ GEN_CHECK_OFF_DOT(PATMGCSTATE, Restore.uEDI);
+ GEN_CHECK_OFF_DOT(PATMGCSTATE, Restore.eFlags);
+ GEN_CHECK_OFF_DOT(PATMGCSTATE, Restore.uFlags);
+ GEN_CHECK_SIZE(PATMTREES);
+ GEN_CHECK_OFF(PATMTREES, PatchTree);
+ GEN_CHECK_OFF(PATMTREES, PatchTreeByPatchAddr);
+ GEN_CHECK_OFF(PATMTREES, PatchTreeByPage);
+ GEN_CHECK_SIZE(PATMPATCHREC);
+ GEN_CHECK_OFF(PATMPATCHREC, Core);
+ GEN_CHECK_OFF(PATMPATCHREC, CoreOffset);
+ GEN_CHECK_OFF(PATMPATCHREC, patch);
+ GEN_CHECK_SIZE(PATCHINFO);
+ GEN_CHECK_OFF(PATCHINFO, uState);
+ GEN_CHECK_OFF(PATCHINFO, uOldState);
+ GEN_CHECK_OFF(PATCHINFO, uOpMode);
+ GEN_CHECK_OFF(PATCHINFO, unusedHC);
+ GEN_CHECK_OFF(PATCHINFO, pPrivInstrGC);
+ GEN_CHECK_OFF(PATCHINFO, aPrivInstr);
+ GEN_CHECK_OFF(PATCHINFO, aPrivInstr[1]);
+ GEN_CHECK_OFF(PATCHINFO, aPrivInstr[MAX_INSTR_SIZE - 1]);
+ GEN_CHECK_OFF(PATCHINFO, cbPrivInstr);
+ GEN_CHECK_OFF(PATCHINFO, opcode);
+ GEN_CHECK_OFF(PATCHINFO, cbPatchJump);
+ GEN_CHECK_OFF(PATCHINFO, pPatchJumpDestGC);
+ GEN_CHECK_OFF(PATCHINFO, pPatchBlockOffset);
+ GEN_CHECK_OFF(PATCHINFO, cbPatchBlockSize);
+ GEN_CHECK_OFF(PATCHINFO, uCurPatchOffset);
+ GEN_CHECK_OFF(PATCHINFO, flags);
+ GEN_CHECK_OFF(PATCHINFO, pInstrGCLowest);
+ GEN_CHECK_OFF(PATCHINFO, pInstrGCHighest);
+ GEN_CHECK_OFF(PATCHINFO, FixupTree);
+ GEN_CHECK_OFF(PATCHINFO, nrFixups);
+ GEN_CHECK_OFF(PATCHINFO, JumpTree);
+ GEN_CHECK_OFF(PATCHINFO, nrJumpRecs);
+ GEN_CHECK_OFF(PATCHINFO, Patch2GuestAddrTree);
+ GEN_CHECK_OFF(PATCHINFO, Guest2PatchAddrTree);
+ GEN_CHECK_OFF(PATCHINFO, nrPatch2GuestRecs);
+ GEN_CHECK_OFF(PATCHINFO, unused);
+ GEN_CHECK_OFF_DOT(PATCHINFO, unused.pPatchLocStartHC);
+ GEN_CHECK_OFF_DOT(PATCHINFO, unused.pPatchLocEndHC);
+ GEN_CHECK_OFF_DOT(PATCHINFO, unused.pGuestLoc);
+ GEN_CHECK_OFF_DOT(PATCHINFO, unused.opsize);
+ GEN_CHECK_OFF(PATCHINFO, pTempInfo);
+ GEN_CHECK_OFF(PATCHINFO, pTrampolinePatchesHead);
+ GEN_CHECK_OFF(PATCHINFO, cCodeWrites);
+ GEN_CHECK_OFF(PATCHINFO, cTraps);
+ GEN_CHECK_OFF(PATCHINFO, cInvalidWrites);
+ GEN_CHECK_OFF(PATCHINFO, uPatchIdx);
+ GEN_CHECK_OFF(PATCHINFO, bDirtyOpcode);
+ GEN_CHECK_SIZE(PATMPATCHPAGE);
+ GEN_CHECK_OFF(PATMPATCHPAGE, Core);
+ GEN_CHECK_OFF(PATMPATCHPAGE, pLowestAddrGC);
+ GEN_CHECK_OFF(PATMPATCHPAGE, pHighestAddrGC);
+ GEN_CHECK_OFF(PATMPATCHPAGE, cCount);
+ GEN_CHECK_OFF(PATMPATCHPAGE, cMaxPatches);
+ GEN_CHECK_OFF(PATMPATCHPAGE, papPatch);
+#endif
+
+ GEN_CHECK_SIZE(APIC);
+ GEN_CHECK_OFF(APIC, pvApicPibR0);
+ GEN_CHECK_OFF(APIC, pvApicPibR3);
+ GEN_CHECK_OFF(APIC, cbApicPib);
+ GEN_CHECK_OFF(APIC, enmMaxMode);
+ GEN_CHECK_OFF(APICCPU, pvApicPageR0);
+ GEN_CHECK_OFF(APICCPU, pvApicPageR3);
+ GEN_CHECK_OFF(APICCPU, cbApicPage);
+ GEN_CHECK_OFF(APICCPU, pvApicPibR0);
+ GEN_CHECK_OFF(APICCPU, pvApicPibR3);
+ GEN_CHECK_OFF(APICCPU, ApicPibLevel);
+ GEN_CHECK_OFF(APICCPU, hTimer);
+
+ GEN_CHECK_SIZE(VM);
+ GEN_CHECK_OFF(VM, enmVMState);
+ GEN_CHECK_OFF(VM, fGlobalForcedActions);
+ GEN_CHECK_OFF(VM, paVMPagesR3);
+ GEN_CHECK_OFF(VM, pSession);
+ GEN_CHECK_OFF(VM, pUVM);
+ GEN_CHECK_OFF(VM, pVMR3);
+ GEN_CHECK_OFF(VM, pVMR0ForCall);
+ GEN_CHECK_OFF(VM, pVMRC);
+#ifdef IN_RING0
+ GEN_CHECK_OFF(VM, hSelfUnsafe);
+ GEN_CHECK_OFF(VM, cCpusUnsafe);
+#else
+ GEN_CHECK_OFF(VM, hSelf);
+ GEN_CHECK_OFF(VM, cCpus);
+#endif
+ GEN_CHECK_OFF(VM, uCpuExecutionCap);
+ GEN_CHECK_OFF(VM, cbSelf);
+ GEN_CHECK_OFF(VM, bMainExecutionEngine);
+ GEN_CHECK_OFF(VM, fHMEnabled);
+ GEN_CHECK_OFF(VM, fUseLargePages);
+ GEN_CHECK_OFF(VM, hTraceBufR3);
+ GEN_CHECK_OFF(VM, hTraceBufR0);
+ GEN_CHECK_OFF(VM, cpum);
+ GEN_CHECK_OFF(VM, vmm);
+ GEN_CHECK_OFF(VM, pgm);
+ GEN_CHECK_OFF(VM, hm);
+ GEN_CHECK_OFF(VM, trpm);
+ GEN_CHECK_OFF(VM, selm);
+ GEN_CHECK_OFF(VM, mm);
+ GEN_CHECK_OFF(VM, pdm);
+ GEN_CHECK_OFF(VM, iom);
+#ifdef VBOX_WITH_RAW_MODE
+ GEN_CHECK_OFF(VM, patm);
+ GEN_CHECK_OFF(VM, csam);
+#endif
+ GEN_CHECK_OFF(VM, em);
+ GEN_CHECK_OFF(VM, tm);
+ GEN_CHECK_OFF(VM, dbgf);
+ GEN_CHECK_OFF(VM, ssm);
+ GEN_CHECK_OFF(VM, gim);
+ GEN_CHECK_OFF(VM, vm);
+ GEN_CHECK_OFF(VM, cfgm);
+ GEN_CHECK_OFF(VM, apic);
+
+
+ GEN_CHECK_SIZE(VMCPU);
+ GEN_CHECK_OFF(VMCPU, fLocalForcedActions);
+ GEN_CHECK_OFF(VMCPU, enmState);
+ GEN_CHECK_OFF(VMCPU, pUVCpu);
+ GEN_CHECK_OFF(VMCPU, pVMR3);
+ GEN_CHECK_OFF(VMCPU, pVCpuR0ForVtg);
+ GEN_CHECK_OFF(VMCPU, pVMRC);
+ GEN_CHECK_OFF(VMCPU, idCpu);
+ GEN_CHECK_OFF(VMCPU, hNativeThread);
+ GEN_CHECK_OFF(VMCPU, hNativeThreadR0);
+ GEN_CHECK_OFF(VMCPU, idHostCpu);
+ GEN_CHECK_OFF(VMCPU, fTraceGroups);
+ GEN_CHECK_OFF(VMCPU, uAdHoc);
+ GEN_CHECK_OFF(VMCPU, aStatAdHoc);
+ GEN_CHECK_OFF(VMCPU, hm);
+ GEN_CHECK_OFF(VMCPU, em);
+ GEN_CHECK_OFF(VMCPU, iem);
+ GEN_CHECK_OFF(VMCPU, trpm);
+ GEN_CHECK_OFF(VMCPU, tm);
+ GEN_CHECK_OFF(VMCPU, vmm);
+ GEN_CHECK_OFF(VMCPU, pdm);
+ GEN_CHECK_OFF(VMCPU, iom);
+ GEN_CHECK_OFF(VMCPU, dbgf);
+ GEN_CHECK_OFF(VMCPU, gim);
+ GEN_CHECK_OFF(VMCPU, apic);
+ GEN_CHECK_OFF(VMCPU, pgm);
+ GEN_CHECK_OFF(VMCPU, cpum);
+
+#ifndef VBOX_FOR_DTRACE_LIB
+ GEN_CHECK_SIZE(DISCPUSTATE);
+ GEN_CHECK_OFF(DISCPUSTATE, Param1);
+ GEN_CHECK_OFF(DISCPUSTATE, Param2);
+ GEN_CHECK_OFF(DISCPUSTATE, Param3);
+ GEN_CHECK_OFF(DISCPUSTATE, i32SibDisp);
+ GEN_CHECK_OFF(DISCPUSTATE, fFilter);
+ GEN_CHECK_OFF(DISCPUSTATE, uInstrAddr);
+ GEN_CHECK_OFF(DISCPUSTATE, abInstr);
+ GEN_CHECK_OFF(DISCPUSTATE, pvUser);
+#endif
diff --git a/src/VBox/VMM/testcase/tstVMStructDTrace.cpp b/src/VBox/VMM/testcase/tstVMStructDTrace.cpp
new file mode 100644
index 00000000..496e8537
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstVMStructDTrace.cpp
@@ -0,0 +1,144 @@
+/* $Id: tstVMStructDTrace.cpp $ */
+/** @file
+ * tstVMMStructDTrace - Generates the DTrace test scripts for check that C/C++
+ * and DTrace has the same understand of the VM, VMCPU and
+ * other structures.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define IN_TSTVMSTRUCT 1
+#define IN_TSTVMSTRUCTGC 1
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/stam.h>
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include "CFGMInternal.h"
+#include "CPUMInternal.h"
+#include "MMInternal.h"
+#include "PGMInternal.h"
+#include "SELMInternal.h"
+#include "TRPMInternal.h"
+#include "TMInternal.h"
+#include "IOMInternal.h"
+#include "HMInternal.h"
+#include "APICInternal.h"
+#include "VMMInternal.h"
+#include "DBGFInternal.h"
+#include "GIMInternal.h"
+#include "STAMInternal.h"
+#include "EMInternal.h"
+#include "IEMInternal.h"
+#include "NEMInternal.h"
+#ifdef VBOX_WITH_RAW_MODE
+# include "CSAMInternal.h"
+# include "PATMInternal.h"
+#endif
+#include <VBox/vmm/vm.h>
+#include <VBox/param.h>
+#include <iprt/x86.h>
+#include <iprt/assert.h>
+
+/* we don't use iprt here because we wish to run without trouble. */
+#include <stdio.h>
+
+
+int main()
+{
+ /*
+ * File header and pragmas.
+ */
+ printf("#pragma D option quiet\n");
+// printf("#pragma D depends_on library x86.d\n");
+// printf("#pragma D depends_on library cpumctx.d\n");
+// printf("#pragma D depends_on library CPUMInternal.d\n");
+// printf("#pragma D depends_on library vm.d\n");
+
+ printf("int g_cErrors;\n"
+ "\n"
+ "dtrace:::BEGIN\n"
+ "{\n"
+ " g_cErrors = 0;\n"
+ "}\n"
+ "\n"
+ );
+
+ /*
+ * Test generator macros.
+ */
+#define GEN_CHECK_SIZE(s) \
+ printf("dtrace:::BEGIN\n" \
+ "/sizeof(" #s ") != %u/\n" \
+ "{\n" \
+ " printf(\"error: sizeof(" #s ") should be %u, not %%u\\n\", sizeof(" #s "));\n" \
+ " g_cErrors++;\n" \
+ "}\n" \
+ "\n", \
+ (unsigned)sizeof(s), (unsigned)sizeof(s))
+
+#if 1
+# define GEN_CHECK_OFF(s, m) \
+ printf("dtrace:::BEGIN\n" \
+ "/offsetof(" #s ", " #m ") != %u/\n" \
+ "{\n" \
+ " printf(\"error: offsetof(" #s ", " #m ") should be %u, not %%u\\n\", offsetof(" #s ", " #m "));\n" \
+ " g_cErrors++;\n" \
+ "}\n" \
+ "\n", \
+ (unsigned)RT_OFFSETOF(s, m), (unsigned)RT_OFFSETOF(s, m))
+
+#else
+# define GEN_CHECK_OFF(s, m) do { } while (0)
+#endif
+
+#define GEN_CHECK_OFF_DOT(s, m) do { } while (0)
+
+
+ /*
+ * Body.
+ */
+#define VBOX_FOR_DTRACE_LIB
+#include "tstVMStruct.h"
+
+ /*
+ * Footer.
+ */
+ printf("dtrace:::BEGIN\n"
+ "/g_cErrors != 0/\n"
+ "{\n"
+ " printf(\"%%u errors!\\n\", g_cErrors);\n"
+ " exit(1);\n"
+ "}\n"
+ "\n"
+ "dtrace:::BEGIN\n"
+ "{\n"
+ " printf(\"Success!\\n\");\n"
+ " exit(0);\n"
+ "}\n"
+ "\n"
+ );
+
+
+ return (0);
+}
+
diff --git a/src/VBox/VMM/testcase/tstVMStructRC.cpp b/src/VBox/VMM/testcase/tstVMStructRC.cpp
new file mode 100644
index 00000000..8c065fff
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstVMStructRC.cpp
@@ -0,0 +1,99 @@
+/* $Id: tstVMStructRC.cpp $ */
+/** @file
+ * tstVMMStructRC - Generate structure member and size checks from the
+ * RC perspective.
+ *
+ * This is built using the VBOXRC template but linked into a host
+ * ring-3 executable, rather hacky.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*
+ * Sanity checks.
+ */
+#ifndef IN_RC
+# error Incorrect template!
+#endif
+#if defined(IN_RING3) || defined(IN_RING0)
+# error Incorrect template!
+#endif
+
+#include <VBox/types.h>
+#include <iprt/assert.h>
+AssertCompileSize(uint8_t, 1);
+AssertCompileSize(uint16_t, 2);
+AssertCompileSize(uint32_t, 4);
+AssertCompileSize(uint64_t, 8);
+AssertCompileSize(RTRCPTR, 4);
+#ifdef VBOX_WITH_64_BITS_GUESTS
+AssertCompileSize(RTGCPTR, 8);
+#else
+AssertCompileSize(RTGCPTR, 4);
+#endif
+AssertCompileSize(RTGCPHYS, 8);
+AssertCompileSize(RTHCPHYS, 8);
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define IN_TSTVMSTRUCT 1
+#define IN_TSTVMSTRUCTGC 1
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/stam.h>
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include "CFGMInternal.h"
+#include "CPUMInternal.h"
+#include "MMInternal.h"
+#include "PGMInternal.h"
+#include "SELMInternal.h"
+#include "TMInternal.h"
+#include "IOMInternal.h"
+#include "HMInternal.h"
+#include "APICInternal.h"
+#include "PATMInternal.h"
+#include "VMMInternal.h"
+#include "DBGFInternal.h"
+#include "GIMInternal.h"
+#include "STAMInternal.h"
+#include "CSAMInternal.h"
+#include "EMInternal.h"
+#include "IEMInternal.h"
+#include "NEMInternal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/hm_vmx.h>
+#include <VBox/param.h>
+#include <iprt/x86.h>
+#include <iprt/assert.h>
+
+/* we don't use iprt here because we're pretending to be in GC! */
+#include <stdio.h>
+
+
+int main()
+{
+#define GEN_CHECK_SIZE(s) printf(" CHECK_SIZE(%s, %u);\n", #s, (unsigned)sizeof(s))
+#define GEN_CHECK_OFF(s, m) printf(" CHECK_OFF(%s, %u, %s);\n", #s, (unsigned)RT_OFFSETOF(s, m), #m)
+#define GEN_CHECK_OFF_DOT(s, m) printf(" CHECK_OFF(%s, %u, %s);\n", #s, (unsigned)RT_OFFSETOF(s, m), #m)
+#include "tstVMStruct.h"
+ return (0);
+}
+
diff --git a/src/VBox/VMM/testcase/tstVMStructSize.cpp b/src/VBox/VMM/testcase/tstVMStructSize.cpp
new file mode 100644
index 00000000..5c140f33
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstVMStructSize.cpp
@@ -0,0 +1,464 @@
+/* $Id: tstVMStructSize.cpp $ */
+/** @file
+ * tstVMStructSize - testcase for check structure sizes/alignment
+ * and to verify that HC and GC uses the same
+ * representation of the structures.
+ */
+
+/*
+ * Copyright (C) 2006-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#define IN_TSTVMSTRUCT 1
+#include <VBox/vmm/cfgm.h>
+#include <VBox/vmm/cpum.h>
+#include <VBox/vmm/mm.h>
+#include <VBox/vmm/pgm.h>
+#include <VBox/vmm/selm.h>
+#include <VBox/vmm/trpm.h>
+#include <VBox/vmm/vmm.h>
+#include <VBox/vmm/stam.h>
+#include "PDMInternal.h"
+#include <VBox/vmm/pdm.h>
+#include "CFGMInternal.h"
+#include "CPUMInternal.h"
+#include "MMInternal.h"
+#include "PGMInternal.h"
+#include "SELMInternal.h"
+#include "TRPMInternal.h"
+#include "TMInternal.h"
+#include "IOMInternal.h"
+#include "SSMInternal.h"
+#include "HMInternal.h"
+#include "VMMInternal.h"
+#include "DBGFInternal.h"
+#include "GIMInternal.h"
+#include "APICInternal.h"
+#include "STAMInternal.h"
+#include "VMInternal.h"
+#include "EMInternal.h"
+#include "IEMInternal.h"
+#include "NEMInternal.h"
+#include "../VMMR0/GMMR0Internal.h"
+#include "../VMMR0/GVMMR0Internal.h"
+#include <VBox/vmm/vm.h>
+#include <VBox/vmm/uvm.h>
+#include <VBox/vmm/gvm.h>
+#include <VBox/param.h>
+#include <VBox/dis.h>
+#include <iprt/x86.h>
+
+#include "tstHelp.h"
+#include <stdio.h>
+
+
+
+int main()
+{
+ int rc = 0;
+ printf("tstVMStructSize: TESTING\n");
+
+ printf("info: struct VM: %d bytes\n", (int)sizeof(VM));
+
+#define CHECK_PADDING_VM(align, member) \
+ do \
+ { \
+ CHECK_PADDING(VM, member, align); \
+ CHECK_MEMBER_ALIGNMENT(VM, member, align); \
+ VM *p = NULL; NOREF(p); \
+ if (sizeof(p->member.padding) >= (ssize_t)sizeof(p->member.s) + 128 + sizeof(p->member.s) / 20) \
+ printf("warning: VM::%-8s: padding=%-5d s=%-5d -> %-4d suggest=%-5u\n", \
+ #member, (int)sizeof(p->member.padding), (int)sizeof(p->member.s), \
+ (int)sizeof(p->member.padding) - (int)sizeof(p->member.s), \
+ (int)RT_ALIGN_Z(sizeof(p->member.s), (align))); \
+ } while (0)
+
+
+#define CHECK_PADDING_VMCPU(align, member) \
+ do \
+ { \
+ CHECK_PADDING(VMCPU, member, align); \
+ CHECK_MEMBER_ALIGNMENT(VMCPU, member, align); \
+ VMCPU *p = NULL; NOREF(p); \
+ if (sizeof(p->member.padding) >= (ssize_t)sizeof(p->member.s) + 128 + sizeof(p->member.s) / 20) \
+ printf("warning: VMCPU::%-8s: padding=%-5d s=%-5d -> %-4d suggest=%-5u\n", \
+ #member, (int)sizeof(p->member.padding), (int)sizeof(p->member.s), \
+ (int)sizeof(p->member.padding) - (int)sizeof(p->member.s), \
+ (int)RT_ALIGN_Z(sizeof(p->member.s), (align))); \
+ } while (0)
+
+#define CHECK_CPUMCTXCORE(member) \
+ do { \
+ unsigned off1 = RT_OFFSETOF(CPUMCTX, member) - RT_OFFSETOF(CPUMCTX, rax); \
+ unsigned off2 = RT_OFFSETOF(CPUMCTXCORE, member); \
+ if (off1 != off2) \
+ { \
+ printf("error! CPUMCTX/CORE:: %s! (%#x vs %#x (ctx))\n", #member, off1, off2); \
+ rc++; \
+ } \
+ } while (0)
+
+#define CHECK_PADDING_UVM(align, member) \
+ do \
+ { \
+ CHECK_PADDING(UVM, member, align); \
+ CHECK_MEMBER_ALIGNMENT(UVM, member, align); \
+ UVM *p = NULL; NOREF(p); \
+ if (sizeof(p->member.padding) >= (ssize_t)sizeof(p->member.s) + 128 + sizeof(p->member.s) / 20) \
+ printf("warning: UVM::%-8s: padding=%-5d s=%-5d -> %-4d suggest=%-5u\n", \
+ #member, (int)sizeof(p->member.padding), (int)sizeof(p->member.s), \
+ (int)sizeof(p->member.padding) - (int)sizeof(p->member.s), \
+ (int)RT_ALIGN_Z(sizeof(p->member.s), (align))); \
+ } while (0)
+
+#define CHECK_PADDING_UVMCPU(align, member) \
+ do \
+ { \
+ CHECK_PADDING(UVMCPU, member, align); \
+ CHECK_MEMBER_ALIGNMENT(UVMCPU, member, align); \
+ UVMCPU *p = NULL; NOREF(p); \
+ if (sizeof(p->member.padding) >= (ssize_t)sizeof(p->member.s) + 128 + sizeof(p->member.s) / 20) \
+ printf("warning: UVMCPU::%-8s: padding=%-5d s=%-5d -> %-4d suggest=%-5u\n", \
+ #member, (int)sizeof(p->member.padding), (int)sizeof(p->member.s), \
+ (int)sizeof(p->member.padding) - (int)sizeof(p->member.s), \
+ (int)RT_ALIGN_Z(sizeof(p->member.s), (align))); \
+ } while (0)
+
+#define CHECK_PADDING_GVM(align, member) \
+ do \
+ { \
+ CHECK_PADDING(GVM, member, align); \
+ CHECK_MEMBER_ALIGNMENT(GVM, member, align); \
+ GVM *p = NULL; NOREF(p); \
+ if (sizeof(p->member.padding) >= (ssize_t)sizeof(p->member.s) + 128 + sizeof(p->member.s) / 20) \
+ printf("warning: GVM::%-8s: padding=%-5d s=%-5d -> %-4d suggest=%-5u\n", \
+ #member, (int)sizeof(p->member.padding), (int)sizeof(p->member.s), \
+ (int)sizeof(p->member.padding) - (int)sizeof(p->member.s), \
+ (int)RT_ALIGN_Z(sizeof(p->member.s), (align))); \
+ } while (0)
+
+#define CHECK_PADDING_GVMCPU(align, member) \
+ do \
+ { \
+ CHECK_PADDING(GVMCPU, member, align); \
+ CHECK_MEMBER_ALIGNMENT(GVMCPU, member, align); \
+ GVMCPU *p = NULL; NOREF(p); \
+ if (sizeof(p->member.padding) >= (ssize_t)sizeof(p->member.s) + 128 + sizeof(p->member.s) / 20) \
+ printf("warning: GVMCPU::%-8s: padding=%-5d s=%-5d -> %-4d suggest=%-5u\n", \
+ #member, (int)sizeof(p->member.padding), (int)sizeof(p->member.s), \
+ (int)sizeof(p->member.padding) - (int)sizeof(p->member.s), \
+ (int)RT_ALIGN_Z(sizeof(p->member.s), (align))); \
+ } while (0)
+
+#define PRINT_OFFSET(strct, member) \
+ do \
+ { \
+ printf("info: %10s::%-24s offset %#6x (%6d) sizeof %4d\n", #strct, #member, (int)RT_OFFSETOF(strct, member), (int)RT_OFFSETOF(strct, member), (int)RT_SIZEOFMEMB(strct, member)); \
+ } while (0)
+
+
+ CHECK_SIZE(uint128_t, 128/8);
+ CHECK_SIZE(int128_t, 128/8);
+ CHECK_SIZE(uint64_t, 64/8);
+ CHECK_SIZE(int64_t, 64/8);
+ CHECK_SIZE(uint32_t, 32/8);
+ CHECK_SIZE(int32_t, 32/8);
+ CHECK_SIZE(uint16_t, 16/8);
+ CHECK_SIZE(int16_t, 16/8);
+ CHECK_SIZE(uint8_t, 8/8);
+ CHECK_SIZE(int8_t, 8/8);
+
+ CHECK_SIZE(X86DESC, 8);
+ CHECK_SIZE(X86DESC64, 16);
+ CHECK_SIZE(VBOXIDTE, 8);
+ CHECK_SIZE(VBOXIDTR, 10);
+ CHECK_SIZE(VBOXGDTR, 10);
+ CHECK_SIZE(VBOXTSS, 136);
+ CHECK_SIZE(X86FXSTATE, 512);
+ CHECK_SIZE(RTUUID, 16);
+ CHECK_SIZE(X86PTE, 4);
+ CHECK_SIZE(X86PD, PAGE_SIZE);
+ CHECK_SIZE(X86PDE, 4);
+ CHECK_SIZE(X86PT, PAGE_SIZE);
+ CHECK_SIZE(X86PTEPAE, 8);
+ CHECK_SIZE(X86PTPAE, PAGE_SIZE);
+ CHECK_SIZE(X86PDEPAE, 8);
+ CHECK_SIZE(X86PDPAE, PAGE_SIZE);
+ CHECK_SIZE(X86PDPE, 8);
+ CHECK_SIZE(X86PDPT, PAGE_SIZE);
+ CHECK_SIZE(X86PML4E, 8);
+ CHECK_SIZE(X86PML4, PAGE_SIZE);
+
+ PRINT_OFFSET(VM, cpum);
+ CHECK_PADDING_VM(64, cpum);
+ CHECK_PADDING_VM(64, vmm);
+ PRINT_OFFSET(VM, pgm);
+ PRINT_OFFSET(VM, pgm.s.CritSectX);
+ CHECK_PADDING_VM(64, pgm);
+ PRINT_OFFSET(VM, hm);
+ CHECK_PADDING_VM(64, hm);
+ CHECK_PADDING_VM(64, trpm);
+ CHECK_PADDING_VM(64, selm);
+ CHECK_PADDING_VM(64, mm);
+ CHECK_PADDING_VM(64, pdm);
+ PRINT_OFFSET(VM, pdm.s.CritSect);
+ CHECK_PADDING_VM(64, iom);
+ CHECK_PADDING_VM(64, em);
+ /*CHECK_PADDING_VM(64, iem);*/
+ CHECK_PADDING_VM(64, nem);
+ CHECK_PADDING_VM(64, tm);
+ PRINT_OFFSET(VM, tm.s.VirtualSyncLock);
+ CHECK_PADDING_VM(64, dbgf);
+ CHECK_PADDING_VM(64, gim);
+ CHECK_PADDING_VM(64, ssm);
+ CHECK_PADDING_VM(8, vm);
+ CHECK_PADDING_VM(8, cfgm);
+ CHECK_PADDING_VM(8, apic);
+ PRINT_OFFSET(VM, cfgm);
+ PRINT_OFFSET(VM, apCpusR3);
+
+ PRINT_OFFSET(VMCPU, cpum);
+ CHECK_PADDING_VMCPU(64, iem);
+ CHECK_PADDING_VMCPU(64, hm);
+ CHECK_PADDING_VMCPU(64, em);
+ CHECK_PADDING_VMCPU(64, nem);
+ CHECK_PADDING_VMCPU(64, trpm);
+ CHECK_PADDING_VMCPU(64, tm);
+ CHECK_PADDING_VMCPU(64, vmm);
+ CHECK_PADDING_VMCPU(64, pdm);
+ CHECK_PADDING_VMCPU(64, iom);
+ CHECK_PADDING_VMCPU(64, dbgf);
+ CHECK_PADDING_VMCPU(64, gim);
+ CHECK_PADDING_VMCPU(64, apic);
+
+ PRINT_OFFSET(VMCPU, pgm);
+ CHECK_PADDING_VMCPU(4096, pgm);
+ CHECK_PADDING_VMCPU(4096, cpum);
+
+ PVM pVM = NULL; NOREF(pVM);
+
+ CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.u64CallRing3Arg, 8);
+#if defined(RT_OS_WINDOWS) && defined(RT_ARCH_AMD64)
+ CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallRing3JmpBufR0, 16);
+ CHECK_MEMBER_ALIGNMENT(VMCPU, vmm.s.CallRing3JmpBufR0.xmm6, 16);
+#endif
+ CHECK_MEMBER_ALIGNMENT(VM, vmm.s.u64LastYield, 8);
+
+ /* the VMCPUs are page aligned TLB hit reasons. */
+ CHECK_SIZE_ALIGNMENT(VMCPU, 4096);
+
+ /* cpumctx */
+ CHECK_MEMBER_ALIGNMENT(CPUMCTX, rax, 32);
+ CHECK_MEMBER_ALIGNMENT(CPUMCTX, idtr.pIdt, 8);
+ CHECK_MEMBER_ALIGNMENT(CPUMCTX, gdtr.pGdt, 8);
+ CHECK_MEMBER_ALIGNMENT(CPUMCTX, SysEnter, 8);
+ CHECK_MEMBER_ALIGNMENT(CPUMCTX, hwvirt, 8);
+ CHECK_CPUMCTXCORE(rax);
+ CHECK_CPUMCTXCORE(rcx);
+ CHECK_CPUMCTXCORE(rdx);
+ CHECK_CPUMCTXCORE(rbx);
+ CHECK_CPUMCTXCORE(rsp);
+ CHECK_CPUMCTXCORE(rbp);
+ CHECK_CPUMCTXCORE(rsi);
+ CHECK_CPUMCTXCORE(rdi);
+ CHECK_CPUMCTXCORE(r8);
+ CHECK_CPUMCTXCORE(r9);
+ CHECK_CPUMCTXCORE(r10);
+ CHECK_CPUMCTXCORE(r11);
+ CHECK_CPUMCTXCORE(r12);
+ CHECK_CPUMCTXCORE(r13);
+ CHECK_CPUMCTXCORE(r14);
+ CHECK_CPUMCTXCORE(r15);
+ CHECK_CPUMCTXCORE(es);
+ CHECK_CPUMCTXCORE(ss);
+ CHECK_CPUMCTXCORE(cs);
+ CHECK_CPUMCTXCORE(ds);
+ CHECK_CPUMCTXCORE(fs);
+ CHECK_CPUMCTXCORE(gs);
+ CHECK_CPUMCTXCORE(rip);
+ CHECK_CPUMCTXCORE(rflags);
+
+#if HC_ARCH_BITS == 32
+ /* CPUMHOSTCTX - lss pair */
+ if (RT_UOFFSETOF(CPUMHOSTCTX, esp) + 4 != RT_UOFFSETOF(CPUMHOSTCTX, ss))
+ {
+ printf("error! CPUMHOSTCTX lss has been split up!\n");
+ rc++;
+ }
+#endif
+ CHECK_SIZE_ALIGNMENT(CPUMCTX, 64);
+ CHECK_SIZE_ALIGNMENT(CPUMHOSTCTX, 64);
+ CHECK_SIZE_ALIGNMENT(CPUMCTXMSRS, 64);
+
+ /* pdm */
+ PRINT_OFFSET(PDMDEVINSR3, Internal);
+ PRINT_OFFSET(PDMDEVINSR3, achInstanceData);
+ CHECK_MEMBER_ALIGNMENT(PDMDEVINSR3, achInstanceData, 64);
+ CHECK_PADDING(PDMDEVINSR3, Internal, 1);
+
+ PRINT_OFFSET(PDMDEVINSR0, Internal);
+ PRINT_OFFSET(PDMDEVINSR0, achInstanceData);
+ CHECK_MEMBER_ALIGNMENT(PDMDEVINSR0, achInstanceData, 64);
+ CHECK_PADDING(PDMDEVINSR0, Internal, 1);
+
+ PRINT_OFFSET(PDMDEVINSRC, Internal);
+ PRINT_OFFSET(PDMDEVINSRC, achInstanceData);
+ CHECK_MEMBER_ALIGNMENT(PDMDEVINSRC, achInstanceData, 64);
+ CHECK_PADDING(PDMDEVINSRC, Internal, 1);
+
+ PRINT_OFFSET(PDMUSBINS, Internal);
+ PRINT_OFFSET(PDMUSBINS, achInstanceData);
+ CHECK_MEMBER_ALIGNMENT(PDMUSBINS, achInstanceData, 32);
+ CHECK_PADDING(PDMUSBINS, Internal, 1);
+
+ PRINT_OFFSET(PDMDRVINS, Internal);
+ PRINT_OFFSET(PDMDRVINS, achInstanceData);
+ CHECK_MEMBER_ALIGNMENT(PDMDRVINS, achInstanceData, 32);
+ CHECK_PADDING(PDMDRVINS, Internal, 1);
+
+ CHECK_PADDING2(PDMCRITSECT);
+ CHECK_PADDING2(PDMCRITSECTRW);
+
+ /* pgm */
+#if defined(VBOX_WITH_2X_4GB_ADDR_SPACE)
+ CHECK_MEMBER_ALIGNMENT(PGMCPU, AutoSet, 8);
+#endif
+ CHECK_MEMBER_ALIGNMENT(PGMCPU, GCPhysCR3, sizeof(RTGCPHYS));
+ CHECK_MEMBER_ALIGNMENT(PGMCPU, aGCPhysGstPaePDs, sizeof(RTGCPHYS));
+ CHECK_MEMBER_ALIGNMENT(PGMCPU, DisState, 8);
+ CHECK_MEMBER_ALIGNMENT(PGMCPU, cPoolAccessHandler, 8);
+ CHECK_MEMBER_ALIGNMENT(PGMPOOLPAGE, idx, sizeof(uint16_t));
+ CHECK_MEMBER_ALIGNMENT(PGMPOOLPAGE, pvPageR3, sizeof(RTHCPTR));
+ CHECK_MEMBER_ALIGNMENT(PGMPOOLPAGE, GCPhys, sizeof(RTGCPHYS));
+ CHECK_SIZE(PGMPAGE, 16);
+ CHECK_MEMBER_ALIGNMENT(PGMRAMRANGE, aPages, 16);
+ CHECK_MEMBER_ALIGNMENT(PGMREGMMIO2RANGE, RamRange, 16);
+
+ /* TM */
+ CHECK_MEMBER_ALIGNMENT(TM, TimerCritSect, sizeof(uintptr_t));
+ CHECK_MEMBER_ALIGNMENT(TM, VirtualSyncLock, sizeof(uintptr_t));
+
+ /* misc */
+ CHECK_PADDING3(EMCPU, u.FatalLongJump, u.achPaddingFatalLongJump);
+ CHECK_SIZE_ALIGNMENT(VMMR0JMPBUF, 8);
+#if 0
+ PRINT_OFFSET(VM, fForcedActions);
+ PRINT_OFFSET(VM, StatQemuToGC);
+ PRINT_OFFSET(VM, StatGCToQemu);
+#endif
+
+ CHECK_MEMBER_ALIGNMENT(IOM, CritSect, sizeof(uintptr_t));
+ CHECK_MEMBER_ALIGNMENT(EMCPU, u.achPaddingFatalLongJump, 32);
+ CHECK_MEMBER_ALIGNMENT(EMCPU, aExitRecords, sizeof(EMEXITREC));
+ CHECK_MEMBER_ALIGNMENT(PGM, CritSectX, sizeof(uintptr_t));
+ CHECK_MEMBER_ALIGNMENT(PDM, CritSect, sizeof(uintptr_t));
+ CHECK_MEMBER_ALIGNMENT(MMHYPERHEAP, Lock, sizeof(uintptr_t));
+
+ /* hm - 32-bit gcc won't align uint64_t naturally, so check. */
+ CHECK_MEMBER_ALIGNMENT(HM, uMaxAsid, 8);
+ CHECK_MEMBER_ALIGNMENT(HM, vmx, 8);
+ CHECK_MEMBER_ALIGNMENT(HM, vmx.Msrs, 8);
+ CHECK_MEMBER_ALIGNMENT(HM, svm, 8);
+ CHECK_MEMBER_ALIGNMENT(HM, PatchTree, 8);
+ CHECK_MEMBER_ALIGNMENT(HM, aPatches, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, vmx, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfo, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfo.pfnStartVM, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfoNstGst, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.VmcsInfoNstGst.pfnStartVM, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.RestoreHost, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, vmx.LastError, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, svm, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, svm.pfnVMRun, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, Event, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, Event.u64IntInfo, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, DisState, 8);
+ CHECK_MEMBER_ALIGNMENT(HMCPU, StatEntry, 8);
+
+ /* Make sure the set is large enough and has the correct size. */
+ CHECK_SIZE(VMCPUSET, 32);
+ if (sizeof(VMCPUSET) * 8 < VMM_MAX_CPU_COUNT)
+ {
+ printf("error! VMCPUSET is too small for VMM_MAX_CPU_COUNT=%u!\n", VMM_MAX_CPU_COUNT);
+ rc++;
+ }
+
+ printf("info: struct UVM: %d bytes\n", (int)sizeof(UVM));
+
+ CHECK_PADDING_UVM(32, vm);
+ CHECK_PADDING_UVM(32, mm);
+ CHECK_PADDING_UVM(32, pdm);
+ CHECK_PADDING_UVM(32, stam);
+
+ printf("info: struct UVMCPU: %d bytes\n", (int)sizeof(UVMCPU));
+ CHECK_PADDING_UVMCPU(32, vm);
+
+ CHECK_PADDING_GVM(4, gvmm);
+ CHECK_PADDING_GVM(4, gmm);
+ CHECK_PADDING_GVMCPU(4, gvmm);
+
+ /*
+ * Check that the optimized access macros for PGMPAGE works correctly (kind of
+ * obsolete after dropping raw-mode).
+ */
+ PGMPAGE Page;
+ PGM_PAGE_CLEAR(&Page);
+
+ CHECK_EXPR(PGM_PAGE_GET_HNDL_PHYS_STATE(&Page) == PGM_PAGE_HNDL_PHYS_STATE_NONE);
+ CHECK_EXPR(PGM_PAGE_HAS_ANY_HANDLERS(&Page) == false);
+ CHECK_EXPR(PGM_PAGE_HAS_ACTIVE_HANDLERS(&Page) == false);
+ CHECK_EXPR(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(&Page) == false);
+
+ PGM_PAGE_SET_HNDL_PHYS_STATE(&Page, PGM_PAGE_HNDL_PHYS_STATE_ALL);
+ CHECK_EXPR(PGM_PAGE_GET_HNDL_PHYS_STATE(&Page) == PGM_PAGE_HNDL_PHYS_STATE_ALL);
+ CHECK_EXPR(PGM_PAGE_HAS_ANY_HANDLERS(&Page) == true);
+ CHECK_EXPR(PGM_PAGE_HAS_ACTIVE_HANDLERS(&Page) == true);
+ CHECK_EXPR(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(&Page) == true);
+
+ PGM_PAGE_SET_HNDL_PHYS_STATE(&Page, PGM_PAGE_HNDL_PHYS_STATE_WRITE);
+ CHECK_EXPR(PGM_PAGE_GET_HNDL_PHYS_STATE(&Page) == PGM_PAGE_HNDL_PHYS_STATE_WRITE);
+ CHECK_EXPR(PGM_PAGE_HAS_ANY_HANDLERS(&Page) == true);
+ CHECK_EXPR(PGM_PAGE_HAS_ACTIVE_HANDLERS(&Page) == true);
+ CHECK_EXPR(PGM_PAGE_HAS_ACTIVE_ALL_HANDLERS(&Page) == false);
+
+#undef AssertFatal
+#define AssertFatal(expr) do { } while (0)
+#undef Assert
+#define Assert(expr) do { } while (0)
+
+ PGM_PAGE_CLEAR(&Page);
+ CHECK_EXPR(PGM_PAGE_GET_HCPHYS_NA(&Page) == 0);
+ PGM_PAGE_SET_HCPHYS(NULL, &Page, UINT64_C(0x0000fffeff1ff000));
+ CHECK_EXPR(PGM_PAGE_GET_HCPHYS_NA(&Page) == UINT64_C(0x0000fffeff1ff000));
+ PGM_PAGE_SET_HCPHYS(NULL, &Page, UINT64_C(0x0000000000001000));
+ CHECK_EXPR(PGM_PAGE_GET_HCPHYS_NA(&Page) == UINT64_C(0x0000000000001000));
+
+ PGM_PAGE_INIT(&Page, UINT64_C(0x0000feedfacef000), UINT32_C(0x12345678), PGMPAGETYPE_RAM, PGM_PAGE_STATE_ALLOCATED);
+ CHECK_EXPR(PGM_PAGE_GET_HCPHYS_NA(&Page) == UINT64_C(0x0000feedfacef000));
+ CHECK_EXPR(PGM_PAGE_GET_PAGEID(&Page) == UINT32_C(0x12345678));
+ CHECK_EXPR(PGM_PAGE_GET_TYPE_NA(&Page) == PGMPAGETYPE_RAM);
+ CHECK_EXPR(PGM_PAGE_GET_STATE_NA(&Page) == PGM_PAGE_STATE_ALLOCATED);
+
+
+ /*
+ * Report result.
+ */
+ if (rc)
+ printf("tstVMStructSize: FAILURE - %d errors\n", rc);
+ else
+ printf("tstVMStructSize: SUCCESS\n");
+ return rc;
+}
+
diff --git a/src/VBox/VMM/testcase/tstX86-1.cpp b/src/VBox/VMM/testcase/tstX86-1.cpp
new file mode 100644
index 00000000..8e38e437
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstX86-1.cpp
@@ -0,0 +1,270 @@
+/* $Id: tstX86-1.cpp $ */
+/** @file
+ * X86 instruction set exploration/testcase #1.
+ */
+
+/*
+ * Copyright (C) 2011-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/test.h>
+#include <iprt/param.h>
+#include <iprt/mem.h>
+#include <iprt/errcore.h>
+#include <iprt/assert.h>
+#include <iprt/x86.h>
+
+#ifdef RT_OS_WINDOWS
+# include <iprt/win/windows.h>
+#else
+# ifdef RT_OS_DARWIN
+# define _XOPEN_SOURCE
+# endif
+# include <signal.h>
+# include <ucontext.h>
+# define USE_SIGNAL
+#endif
+
+
+/*********************************************************************************************************************************
+* Structures and Typedefs *
+*********************************************************************************************************************************/
+typedef struct TRAPINFO
+{
+ uintptr_t uTrapPC;
+ uintptr_t uResumePC;
+ uint8_t u8Trap;
+ uint8_t cbInstr;
+ uint8_t auAlignment[sizeof(uintptr_t) * 2 - 2];
+} TRAPINFO;
+typedef TRAPINFO const *PCTRAPINFO;
+
+
+/*********************************************************************************************************************************
+* Global Variables *
+*********************************************************************************************************************************/
+RT_C_DECLS_BEGIN
+uint8_t *g_pbEfPage = NULL;
+uint8_t *g_pbEfExecPage = NULL;
+extern TRAPINFO g_aTrapInfo[];
+RT_C_DECLS_END
+
+
+/*********************************************************************************************************************************
+* Internal Functions *
+*********************************************************************************************************************************/
+DECLASM(int32_t) x861_Test1(void);
+DECLASM(int32_t) x861_Test2(void);
+DECLASM(int32_t) x861_Test3(void);
+DECLASM(int32_t) x861_Test4(void);
+DECLASM(int32_t) x861_Test5(void);
+DECLASM(int32_t) x861_Test6(void);
+DECLASM(int32_t) x861_Test7(void);
+DECLASM(int32_t) x861_TestFPUInstr1(void);
+
+
+
+static PCTRAPINFO findTrapInfo(uintptr_t uTrapPC, uintptr_t uTrapSP)
+{
+ /* Search by trap program counter. */
+ for (unsigned i = 0; g_aTrapInfo[i].uTrapPC; i++)
+ if (g_aTrapInfo[i].uTrapPC == uTrapPC)
+ return &g_aTrapInfo[i];
+
+ /* Search by return address. */
+ uintptr_t uReturn = *(uintptr_t *)uTrapSP;
+ for (unsigned i = 0; g_aTrapInfo[i].uTrapPC; i++)
+ if (g_aTrapInfo[i].uTrapPC + g_aTrapInfo[i].cbInstr == uReturn)
+ return &g_aTrapInfo[i];
+
+ return NULL;
+}
+
+#ifdef USE_SIGNAL
+static void sigHandler(int iSig, siginfo_t *pSigInfo, void *pvSigCtx)
+{
+ ucontext_t *pCtx = (ucontext_t *)pvSigCtx;
+ NOREF(pSigInfo);
+
+# if defined(RT_ARCH_AMD64) && defined(RT_OS_DARWIN)
+ uintptr_t *puPC = (uintptr_t *)&pCtx->uc_mcontext->__ss.__rip;
+ uintptr_t *puSP = (uintptr_t *)&pCtx->uc_mcontext->__ss.__rsp;
+ uintptr_t uTrapNo = pCtx->uc_mcontext->__es.__trapno;
+ uintptr_t uErr = pCtx->uc_mcontext->__es.__err;
+ uintptr_t uCr2 = pCtx->uc_mcontext->__es.__faultvaddr;
+
+# elif defined(RT_ARCH_AMD64) && defined(RT_OS_FREEBSD)
+ uintptr_t *puPC = (uintptr_t *)&pCtx->uc_mcontext.mc_rip;
+ uintptr_t *puSP = (uintptr_t *)&pCtx->uc_mcontext.mc_rsp;
+ uintptr_t uTrapNo = ~(uintptr_t)0;
+ uintptr_t uErr = ~(uintptr_t)0;
+ uintptr_t uCr2 = ~(uintptr_t)0;
+
+# elif defined(RT_ARCH_AMD64)
+ uintptr_t *puPC = (uintptr_t *)&pCtx->uc_mcontext.gregs[REG_RIP];
+ uintptr_t *puSP = (uintptr_t *)&pCtx->uc_mcontext.gregs[REG_RSP];
+ uintptr_t uTrapNo = pCtx->uc_mcontext.gregs[REG_TRAPNO];
+ uintptr_t uErr = pCtx->uc_mcontext.gregs[REG_ERR];
+ uintptr_t uCr2 = pCtx->uc_mcontext.gregs[REG_CR2];
+
+# elif defined(RT_ARCH_X86) && defined(RT_OS_DARWIN)
+ uintptr_t *puPC = (uintptr_t *)&pCtx->uc_mcontext->__ss.__eip;
+ uintptr_t *puSP = (uintptr_t *)&pCtx->uc_mcontext->__ss.__esp;
+ uintptr_t uTrapNo = pCtx->uc_mcontext->__es.__trapno;
+ uintptr_t uErr = pCtx->uc_mcontext->__es.__err;
+ uintptr_t uCr2 = pCtx->uc_mcontext->__es.__faultvaddr;
+
+# elif defined(RT_ARCH_X86) && defined(RT_OS_FREEBSD)
+ uintptr_t *puPC = (uintptr_t *)&pCtx->uc_mcontext.mc_eip;
+ uintptr_t *puSP = (uintptr_t *)&pCtx->uc_mcontext.mc_esp;
+ uintptr_t uTrapNo = ~(uintptr_t)0;
+ uintptr_t uErr = ~(uintptr_t)0;
+ uintptr_t uCr2 = ~(uintptr_t)0;
+
+# elif defined(RT_ARCH_X86)
+ uintptr_t *puPC = (uintptr_t *)&pCtx->uc_mcontext.gregs[REG_EIP];
+ uintptr_t *puSP = (uintptr_t *)&pCtx->uc_mcontext.gregs[REG_ESP];
+ uintptr_t uTrapNo = pCtx->uc_mcontext.gregs[REG_TRAPNO];
+ uintptr_t uErr = pCtx->uc_mcontext.gregs[REG_ERR];
+# ifdef REG_CR2 /** @todo ... */
+ uintptr_t uCr2 = pCtx->uc_mcontext.gregs[REG_CR2];
+# else
+ uintptr_t uCr2 = ~(uintptr_t)0;
+# endif
+
+# else
+ uintptr_t *puPC = NULL;
+ uintptr_t *puSP = NULL;
+ uintptr_t uTrapNo = ~(uintptr_t)0;
+ uintptr_t uErr = ~(uintptr_t)0;
+ uintptr_t uCr2 = ~(uintptr_t)0;
+# endif
+ if (uTrapNo == X86_XCPT_PF)
+ RTAssertMsg2("tstX86-1: Trap #%#04x err=%#06x at %p / %p\n", uTrapNo, uErr, *puPC, uCr2);
+ else
+ RTAssertMsg2("tstX86-1: Trap #%#04x err=%#06x at %p\n", uTrapNo, uErr, *puPC);
+
+ PCTRAPINFO pTrapInfo = findTrapInfo(*puPC, *puSP);
+ if (pTrapInfo)
+ {
+ if (pTrapInfo->u8Trap != uTrapNo && uTrapNo != ~(uintptr_t)0)
+ RTAssertMsg2("tstX86-1: Expected #%#04x, got #%#04x\n", pTrapInfo->u8Trap, uTrapNo);
+ else
+ {
+ if (*puPC != pTrapInfo->uTrapPC)
+ *puSP += sizeof(uintptr_t);
+ *puPC = pTrapInfo->uResumePC;
+ return;
+ }
+ }
+ else
+ RTAssertMsg2("tstX86-1: Unexpected trap!\n");
+
+ /* die */
+ signal(iSig, SIG_IGN);
+}
+#else
+
+#endif
+
+
+
+int main()
+{
+ /*
+ * Set up the test environment.
+ */
+ RTTEST hTest;
+ RTEXITCODE rcExit = RTTestInitAndCreate("tstX86-1", &hTest);
+ if (rcExit != RTEXITCODE_SUCCESS)
+ return rcExit;
+ RTTestBanner(hTest);
+
+ g_pbEfPage = (uint8_t *)RTTestGuardedAllocTail(hTest, PAGE_SIZE);
+ RTTESTI_CHECK(g_pbEfPage != NULL);
+
+ g_pbEfExecPage = (uint8_t *)RTMemExecAlloc(PAGE_SIZE*2);
+ RTTESTI_CHECK(g_pbEfExecPage != NULL);
+ RTTESTI_CHECK(!((uintptr_t)g_pbEfExecPage & PAGE_OFFSET_MASK));
+ RTTESTI_CHECK_RC(RTMemProtect(g_pbEfExecPage + PAGE_SIZE, PAGE_SIZE, RTMEM_PROT_NONE), VINF_SUCCESS);
+
+#ifdef USE_SIGNAL
+ static int const s_aiSigs[] = { SIGBUS, SIGSEGV, SIGFPE, SIGILL };
+ for (unsigned i = 0; i < RT_ELEMENTS(s_aiSigs); i++)
+ {
+ struct sigaction SigAct;
+ RTTESTI_CHECK_BREAK(sigaction(s_aiSigs[i], NULL, &SigAct) == 0);
+ SigAct.sa_sigaction = sigHandler;
+ SigAct.sa_flags |= SA_SIGINFO;
+ RTTESTI_CHECK(sigaction(s_aiSigs[i], &SigAct, NULL) == 0);
+ }
+#else
+ /** @todo implement me. */
+#endif
+
+
+ if (!RTTestErrorCount(hTest))
+ {
+ /*
+ * Do the testing.
+ */
+ int32_t rc;
+#if 0
+ RTTestSub(hTest, "Misc 1");
+ rc = x861_Test1();
+ if (rc != 0)
+ RTTestFailed(hTest, "x861_Test1 -> %d", rc);
+
+ RTTestSub(hTest, "Prefixes and groups");
+ rc = x861_Test2();
+ if (rc != 0)
+ RTTestFailed(hTest, "x861_Test2 -> %d", rc);
+
+ RTTestSub(hTest, "fxsave / fxrstor and #PFs");
+ rc = x861_Test3();
+ if (rc != 0)
+ RTTestFailed(hTest, "x861_Test3 -> %d", rc);
+
+ RTTestSub(hTest, "Multibyte NOPs");
+ rc = x861_Test4();
+ if (rc != 0)
+ RTTestFailed(hTest, "x861_Test4 -> %d", rc);
+//#endif
+
+ RTTestSub(hTest, "Odd encodings and odd ends");
+ rc = x861_Test5();
+ if (rc != 0)
+ RTTestFailed(hTest, "x861_Test5 -> %d", rc);
+
+//#if 0
+ RTTestSub(hTest, "Odd floating point encodings");
+ rc = x861_Test6();
+ if (rc != 0)
+ RTTestFailed(hTest, "x861_Test5 -> %d", rc);
+
+ RTTestSub(hTest, "Floating point exceptions ++");
+ rc = x861_Test7();
+ if (rc != 0)
+ RTTestFailed(hTest, "x861_Test6 -> %d", rc);
+#endif
+
+ rc = x861_TestFPUInstr1();
+ if (rc != 0)
+ RTTestFailed(hTest, "x861_TestFPUInstr1 -> %d", rc);
+ }
+
+ return RTTestSummaryAndDestroy(hTest);
+}
+
diff --git a/src/VBox/VMM/testcase/tstX86-1A.asm b/src/VBox/VMM/testcase/tstX86-1A.asm
new file mode 100644
index 00000000..d2b74a06
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstX86-1A.asm
@@ -0,0 +1,3443 @@
+; $Id: tstX86-1A.asm $
+;; @file
+; X86 instruction set exploration/testcase #1.
+;
+
+;
+; Copyright (C) 2011-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Header Files ;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+%include "iprt/asmdefs.mac"
+%include "iprt/x86.mac"
+
+;; @todo Move this to a header?
+struc TRAPINFO
+ .uTrapPC RTCCPTR_RES 1
+ .uResumePC RTCCPTR_RES 1
+ .u8TrapNo resb 1
+ .cbInstr resb 1
+ .au8Padding resb (RTCCPTR_CB*2 - 2)
+endstruc
+
+
+%ifdef RT_ARCH_AMD64
+ %define arch_fxsave o64 fxsave
+ %define arch_fxrstor o64 fxrstor
+%else
+ %define arch_fxsave fxsave
+ %define arch_fxrstor fxrstor
+%endif
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Global Variables ;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+BEGINDATA
+extern NAME(g_pbEfPage)
+extern NAME(g_pbEfExecPage)
+
+GLOBALNAME g_szAlpha
+ db "abcdefghijklmnopqrstuvwxyz", 0
+g_szAlpha_end:
+%define g_cchAlpha (g_szAlpha_end - NAME(g_szAlpha))
+ db 0, 0, 0,
+
+;; @name Floating point constants.
+; @{
+g_r32_0dot1: dd 0.1
+g_r32_3dot2: dd 3.2
+g_r32_Zero: dd 0.0
+g_r32_One: dd 1.0
+g_r32_Two: dd 2.0
+g_r32_Three: dd 3.0
+g_r32_Ten: dd 10.0
+g_r32_Eleven: dd 11.0
+g_r32_ThirtyTwo:dd 32.0
+g_r32_Min: dd 000800000h
+g_r32_Max: dd 07f7fffffh
+g_r32_Inf: dd 07f800000h
+g_r32_SNaN: dd 07f800001h
+g_r32_SNaNMax: dd 07fbfffffh
+g_r32_QNaN: dd 07fc00000h
+g_r32_QNaNMax: dd 07fffffffh
+g_r32_NegQNaN: dd 0ffc00000h
+
+g_r64_0dot1: dq 0.1
+g_r64_6dot9: dq 6.9
+g_r64_Zero: dq 0.0
+g_r64_One: dq 1.0
+g_r64_Two: dq 2.0
+g_r64_Three: dq 3.0
+g_r64_Ten: dq 10.0
+g_r64_Eleven: dq 11.0
+g_r64_ThirtyTwo:dq 32.0
+g_r64_Min: dq 00010000000000000h
+g_r64_Max: dq 07fefffffffffffffh
+g_r64_Inf: dq 07ff0000000000000h
+g_r64_SNaN: dq 07ff0000000000001h
+g_r64_SNaNMax: dq 07ff7ffffffffffffh
+g_r64_NegQNaN: dq 0fff8000000000000h
+g_r64_QNaN: dq 07ff8000000000000h
+g_r64_QNaNMax: dq 07fffffffffffffffh
+g_r64_DnMin: dq 00000000000000001h
+g_r64_DnMax: dq 0000fffffffffffffh
+
+
+g_r80_0dot1: dt 0.1
+g_r80_3dot2: dt 3.2
+g_r80_Zero: dt 0.0
+g_r80_One: dt 1.0
+g_r80_Two: dt 2.0
+g_r80_Three: dt 3.0
+g_r80_Ten: dt 10.0
+g_r80_Eleven: dt 11.0
+g_r80_ThirtyTwo:dt 32.0
+g_r80_Min: dt 000018000000000000000h
+g_r80_Max: dt 07ffeffffffffffffffffh
+g_r80_Inf: dt 07fff8000000000000000h
+g_r80_QNaN: dt 07fffc000000000000000h
+g_r80_QNaNMax: dt 07fffffffffffffffffffh
+g_r80_NegQNaN: dt 0ffffc000000000000000h
+g_r80_SNaN: dt 07fff8000000000000001h
+g_r80_SNaNMax: dt 07fffbfffffffffffffffh
+g_r80_DnMin: dt 000000000000000000001h
+g_r80_DnMax: dt 000007fffffffffffffffh
+
+g_r32V1: dd 3.2
+g_r32V2: dd -1.9
+g_r64V1: dq 6.4
+g_r80V1: dt 8.0
+
+; Denormal numbers.
+g_r32D0: dd 000200000h
+;; @}
+
+;; @name Upconverted Floating point constants
+; @{
+;g_r80_r32_0dot1: dt 0.1
+g_r80_r32_3dot2: dt 04000cccccd0000000000h
+;g_r80_r32_Zero: dt 0.0
+;g_r80_r32_One: dt 1.0
+;g_r80_r32_Two: dt 2.0
+;g_r80_r32_Three: dt 3.0
+;g_r80_r32_Ten: dt 10.0
+;g_r80_r32_Eleven: dt 11.0
+;g_r80_r32_ThirtyTwo: dt 32.0
+;; @}
+
+;; @name Decimal constants.
+; @{
+g_u64Zero: dd 0
+g_u32Zero: dw 0
+g_u64Two: dd 2
+g_u32Two: dw 2
+;; @}
+
+
+;;
+; The last global data item. We build this as we write the code.
+ align 8
+GLOBALNAME g_aTrapInfo
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; Defined Constants And Macros ;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Reference a variable
+%ifdef RT_ARCH_AMD64
+ %define REF(a_Name) a_Name wrt rip
+%else
+ %define REF(a_Name) a_Name
+%endif
+
+;; Reference a global variable
+%ifdef RT_ARCH_AMD64
+ %define REF_EXTERN(a_Name) NAME(a_Name) wrt rip
+%else
+ %define REF_EXTERN(a_Name) NAME(a_Name)
+%endif
+
+
+;;
+; Macro for checking a memory value.
+;
+; @param 1 The size (byte, word, dword, etc)
+; @param 2 The memory address expression.
+; @param 3 The valued expected at the location.
+%macro CheckMemoryValue 3
+ cmp %1 [%2], %3
+ je %%ok
+ mov eax, __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks if a 32-bit floating point memory value is the same as the specified
+; constant (also memory).
+;
+; @uses eax
+; @param 1 Address expression for the 32-bit floating point value
+; to be checked.
+; @param 2 The address expression of the constant.
+;
+%macro CheckMemoryR32ValueConst 2
+ mov eax, [%2]
+ cmp dword [%1], eax
+ je %%ok
+%%bad:
+ mov eax, 90000000 + __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks if a 80-bit floating point memory value is the same as the specified
+; constant (also memory).
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The address expression of the constant.
+;
+%macro CheckMemoryR80ValueConst 2
+ mov eax, [%2]
+ cmp dword [%1], eax
+ je %%ok1
+%%bad:
+ mov eax, 92000000 + __LINE__
+ jmp .return
+%%ok1:
+ mov eax, [4 + %2]
+ cmp dword [%1 + 4], eax
+ jne %%bad
+ mov ax, [8 + %2]
+ cmp word [%1 + 8], ax
+ jne %%bad
+%endmacro
+
+
+;;
+; Macro for recording a trapping instruction (simple).
+;
+; @param 1 The trap number.
+; @param 2+ The instruction which should trap.
+%macro ShouldTrap 2+
+%%trap:
+ %2
+%%trap_end:
+ mov eax, __LINE__
+ jmp .return
+BEGINDATA
+%%trapinfo: istruc TRAPINFO
+ at TRAPINFO.uTrapPC, RTCCPTR_DEF %%trap
+ at TRAPINFO.uResumePC, RTCCPTR_DEF %%resume
+ at TRAPINFO.u8TrapNo, db %1
+ at TRAPINFO.cbInstr, db (%%trap_end - %%trap)
+iend
+BEGINCODE
+%%resume:
+%endmacro
+
+;;
+; Macro for recording a trapping instruction in the exec page.
+;
+; @uses xAX, xDX
+; @param 1 The trap number.
+; @param 2 The offset into the exec page.
+%macro ShouldTrapExecPage 2
+ lea xDX, [REF(NAME(g_aTrapInfoExecPage))]
+ lea xAX, [REF(%%resume)]
+ mov byte [xDX + TRAPINFO.cbInstr], PAGE_SIZE - (%2)
+ mov byte [xDX + TRAPINFO.u8TrapNo], %1
+ mov [xDX + TRAPINFO.uResumePC], xAX
+ mov xAX, [REF_EXTERN(g_pbEfExecPage)]
+ lea xAX, [xAX + (%2)]
+ mov [xDX + TRAPINFO.uTrapPC], xAX
+ jmp xAX
+%%resume:
+%endmacro
+
+
+;;
+; Macro for recording a FPU instruction trapping on a following fwait.
+;
+; Uses stack.
+;
+; @param 1 The status flags that are expected to be set afterwards.
+; @param 2 C0..C3 to mask out in case undefined.
+; @param 3+ The instruction which should trap.
+; @uses eax
+;
+%macro FpuShouldTrap 3+
+ fnclex
+ %3
+%%trap:
+ fwait
+%%trap_end:
+ mov eax, __LINE__
+ jmp .return
+BEGINDATA
+%%trapinfo: istruc TRAPINFO
+ at TRAPINFO.uTrapPC, RTCCPTR_DEF %%trap
+ at TRAPINFO.uResumePC, RTCCPTR_DEF %%resume
+ at TRAPINFO.u8TrapNo, db X86_XCPT_MF
+ at TRAPINFO.cbInstr, db (%%trap_end - %%trap)
+iend
+BEGINCODE
+%%resume:
+ FpuCheckFSW ((%1) | X86_FSW_ES | X86_FSW_B), %2
+ fnclex
+%endmacro
+
+;;
+; Macro for recording checking the FSW value.
+;
+; Uses stack.
+;
+; @param 1 The status flags that are expected to be set afterwards.
+; @param 2 C0..C3 to mask out in case undefined.
+; @uses eax
+;
+%macro FpuCheckFSW 2
+%%resume:
+ fnstsw ax
+ and eax, ~X86_FSW_TOP_MASK & ~(%2)
+ cmp eax, (%1)
+ je %%ok
+ ;int3
+ lea eax, [eax + __LINE__ * 100000]
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks that ST0 has a certain value
+;
+; @uses tword at [xSP]
+;
+%macro CheckSt0Value 3
+ fstp tword [xSP]
+ fld tword [xSP]
+ cmp dword [xSP], %1
+ je %%ok1
+%%bad:
+ mov eax, __LINE__
+ jmp .return
+%%ok1:
+ cmp dword [xSP + 4], %2
+ jne %%bad
+ cmp word [xSP + 8], %3
+ jne %%bad
+%endmacro
+
+;; Checks that ST0 contains QNaN.
+%define CheckSt0Value_QNaN CheckSt0Value 0x00000000, 0xc0000000, 0xffff
+;; Checks that ST0 contains +Inf.
+%define CheckSt0Value_PlusInf CheckSt0Value 0x00000000, 0x80000000, 0x7fff
+;; Checks that ST0 contains 3 & 1/3.
+%define CheckSt0Value_3_and_a_3rd CheckSt0Value 0x55555555, 0xd5555555, 0x4000
+;; Checks that ST0 contains 3 & 1/3.
+%define CheckSt0Value_3_and_two_3rds CheckSt0Value 0xaaaaaaab, 0xeaaaaaaa, 0x4000
+;; Checks that ST0 contains 8.0.
+%define CheckSt0Value_Eight CheckSt0Value 0x00000000, 0x80000000, 0x4002
+
+
+;;
+; Macro for recording checking the FSW value of a FXSAVE image.
+;
+; Uses stack.
+;
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The status flags that are expected to be set afterwards.
+; @param 3 C0..C3 to mask out in case undefined.
+; @uses eax
+; @sa FpuCheckFSW
+;
+%macro FxSaveCheckFSW 3
+%%resume:
+ movzx eax, word [%1 + X86FXSTATE.FSW]
+ and eax, ~X86_FSW_TOP_MASK & ~(%3)
+ cmp eax, (%2)
+ je %%ok
+ mov eax, 100000000 + __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks that ST0 is empty in an FXSAVE image.
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+;
+%macro FxSaveCheckSt0Empty 1
+ movzx eax, word [%1 + X86FXSTATE.FSW]
+ and eax, X86_FSW_TOP_MASK
+ shr eax, X86_FSW_TOP_SHIFT
+ bt [%1 + X86FXSTATE.FTW], eax
+ jnc %%ok
+ mov eax, 200000000 + __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks that ST0 is not-empty in an FXSAVE image.
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+;
+%macro FxSaveCheckSt0NonEmpty 1
+ movzx eax, word [%1 + X86FXSTATE.FSW]
+ and eax, X86_FSW_TOP_MASK
+ shr eax, X86_FSW_TOP_SHIFT
+ bt [%1 + X86FXSTATE.FTW], eax
+ jc %%ok
+ mov eax, 30000000 + __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+;;
+; Checks that STn in a FXSAVE image has a certain value (empty or not
+; is ignored).
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The register number.
+; @param 3 First dword of value.
+; @param 4 Second dword of value.
+; @param 5 Final word of value.
+;
+%macro FxSaveCheckStNValueEx 5
+ cmp dword [%1 + X86FXSTATE.st0 + %2 * 16], %3
+ je %%ok1
+%%bad:
+ mov eax, 40000000 + __LINE__
+ jmp .return
+%%ok1:
+ cmp dword [%1 + X86FXSTATE.st0 + %2 * 16 + 4], %4
+ jne %%bad
+ cmp word [%1 + X86FXSTATE.st0 + %2 * 16 + 8], %5
+ jne %%bad
+%endmacro
+
+
+;;
+; Checks if STn in a FXSAVE image has the same value as the specified
+; floating point (80-bit) constant.
+;
+; @uses eax, xDX
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The register number.
+; @param 3 The address expression of the constant.
+;
+%macro FxSaveCheckStNValueConstEx 3
+ mov eax, [%3]
+ cmp dword [%1 + X86FXSTATE.st0 + %2 * 16], eax
+ je %%ok1
+%%bad:
+ mov eax, 40000000 + __LINE__
+ jmp .return
+%%ok1:
+ mov eax, [4 + %3]
+ cmp dword [%1 + X86FXSTATE.st0 + %2 * 16 + 4], eax
+ jne %%bad
+ mov ax, [8 + %3]
+ cmp word [%1 + X86FXSTATE.st0 + %2 * 16 + 8], ax
+ jne %%bad
+%endmacro
+
+
+;;
+; Checks that ST0 in a FXSAVE image has a certain value.
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 First dword of value.
+; @param 3 Second dword of value.
+; @param 4 Final word of value.
+;
+%macro FxSaveCheckSt0Value 4
+ FxSaveCheckSt0NonEmpty %1
+ FxSaveCheckStNValueEx %1, 0, %2, %3, %4
+%endmacro
+
+
+;;
+; Checks that ST0 in a FXSAVE image is empty and that the value stored is the
+; init value set by FpuInitWithCW.
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+;
+%macro FxSaveCheckSt0EmptyInitValue 1
+ FxSaveCheckSt0Empty %1
+ FxSaveCheckStNValueEx %1, 0, 0x40404040, 0x40404040, 0xffff
+%endmacro
+
+;;
+; Checks that ST0 in a FXSAVE image is non-empty and has the same value as the
+; specified constant (80-bit).
+;
+; @uses eax, xDX
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The address expression of the constant.
+%macro FxSaveCheckSt0ValueConst 2
+ FxSaveCheckSt0NonEmpty %1
+ FxSaveCheckStNValueConstEx %1, 0, %2
+%endmacro
+
+;; Checks that ST0 contains QNaN.
+%define FxSaveCheckSt0Value_QNaN(p) FxSaveCheckSt0Value p, 0x00000000, 0xc0000000, 0xffff
+;; Checks that ST0 contains +Inf.
+%define FxSaveCheckSt0Value_PlusInf(p) FxSaveCheckSt0Value p, 0x00000000, 0x80000000, 0x7fff
+;; Checks that ST0 contains 3 & 1/3.
+%define FxSaveCheckSt0Value_3_and_a_3rd(p) FxSaveCheckSt0Value p, 0x55555555, 0xd5555555, 0x4000
+;; Checks that ST0 contains 3 & 1/3.
+%define FxSaveCheckSt0Value_3_and_two_3rds(p) FxSaveCheckSt0Value p, 0xaaaaaaab, 0xeaaaaaaa, 0x4000
+
+
+
+;;
+; Checks that STn is empty in an FXSAVE image.
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The register number.
+;
+%macro FxSaveCheckStNEmpty 2
+ movzx eax, word [%1 + X86FXSTATE.FSW]
+ and eax, X86_FSW_TOP_MASK
+ shr eax, X86_FSW_TOP_SHIFT
+ add eax, %2
+ and eax, X86_FSW_TOP_SMASK
+ bt [%1 + X86FXSTATE.FTW], eax
+ jnc %%ok
+ mov eax, 20000000 + __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks that STn is not-empty in an FXSAVE image.
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The register number.
+;
+%macro FxSaveCheckStNNonEmpty 2
+ movzx eax, word [%1 + X86FXSTATE.FSW]
+ and eax, X86_FSW_TOP_MASK
+ shr eax, X86_FSW_TOP_SHIFT
+ add eax, %2
+ and eax, X86_FSW_TOP_SMASK
+ bt [%1 + X86FXSTATE.FTW], eax
+ jc %%ok
+ mov eax, 30000000 + __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks that STn in a FXSAVE image has a certain value.
+;
+; @uses eax
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The register number.
+; @param 3 First dword of value.
+; @param 4 Second dword of value.
+; @param 5 Final word of value.
+;
+%macro FxSaveCheckStNValue 5
+ FxSaveCheckStNNonEmpty %1, %2
+ FxSaveCheckStNValueEx %1, %2, %3, %4, %5
+%endmacro
+
+;;
+; Checks that ST0 in a FXSAVE image is non-empty and has the same value as the
+; specified constant (80-bit).
+;
+; @uses eax, xDX
+; @param 1 Address expression for the FXSAVE image.
+; @param 2 The register number.
+; @param 3 The address expression of the constant.
+%macro FxSaveCheckStNValueConst 3
+ FxSaveCheckStNNonEmpty %1, %2
+ FxSaveCheckStNValueConstEx %1, %2, %3
+%endmacro
+
+;; Checks that ST0 contains QNaN.
+%define FxSaveCheckStNValue_QNaN(p, iSt) FxSaveCheckStNValue p, iSt, 0x00000000, 0xc0000000, 0xffff
+;; Checks that ST0 contains +Inf.
+%define FxSaveCheckStNValue_PlusInf(p, iSt) FxSaveCheckStNValue p, iSt, 0x00000000, 0x80000000, 0x7fff
+;; Checks that ST0 contains 3 & 1/3.
+%define FxSaveCheckStNValue_3_and_a_3rd(p, iSt) FxSaveCheckStNValue p, iSt, 0x55555555, 0xd5555555, 0x4000
+;; Checks that ST0 contains 3 & 1/3.
+%define FxSaveCheckStNValue_3_and_two_3rds(p, iSt) FxSaveCheckStNValue p, iSt, 0xaaaaaaab, 0xeaaaaaaa, 0x4000
+
+
+;;
+; Function prologue saving all registers except EAX and aligns the stack
+; on a 16-byte boundrary.
+;
+%macro SAVE_ALL_PROLOGUE 0
+ push xBP
+ mov xBP, xSP
+ pushf
+ push xBX
+ push xCX
+ push xDX
+ push xSI
+ push xDI
+%ifdef RT_ARCH_AMD64
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+ push r13
+ push r14
+ push r15
+%endif
+ and xSP, ~0fh;
+%endmacro
+
+
+;;
+; Function epilogue restoring all regisers except EAX.
+;
+%macro SAVE_ALL_EPILOGUE 0
+%ifdef RT_ARCH_AMD64
+ lea rsp, [rbp - 14 * 8]
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+%else
+ lea esp, [ebp - 6 * 4]
+%endif
+ pop xDI
+ pop xSI
+ pop xDX
+ pop xCX
+ pop xBX
+ popf
+ leave
+%endmacro
+
+
+
+
+BEGINCODE
+
+;;
+; Loads all general registers except xBP and xSP with unique values.
+;
+x861_LoadUniqueRegValues:
+%ifdef RT_ARCH_AMD64
+ mov rax, 00000000000000000h
+ mov rcx, 01111111111111111h
+ mov rdx, 02222222222222222h
+ mov rbx, 03333333333333333h
+ mov rsi, 06666666666666666h
+ mov rdi, 07777777777777777h
+ mov r8, 08888888888888888h
+ mov r9, 09999999999999999h
+ mov r10, 0aaaaaaaaaaaaaaaah
+ mov r11, 0bbbbbbbbbbbbbbbbh
+ mov r12, 0cccccccccccccccch
+ mov r13, 0ddddddddddddddddh
+ mov r14, 0eeeeeeeeeeeeeeeeh
+ mov r15, 0ffffffffffffffffh
+%else
+ mov eax, 000000000h
+ mov ecx, 011111111h
+ mov edx, 022222222h
+ mov ebx, 033333333h
+ mov esi, 066666666h
+ mov edi, 077777777h
+%endif
+ ret
+; end x861_LoadUniqueRegValues
+
+
+;;
+; Clears all general registers except xBP and xSP.
+;
+x861_ClearRegisters:
+ xor eax, eax
+ xor ebx, ebx
+ xor ecx, ecx
+ xor edx, edx
+ xor esi, esi
+ xor edi, edi
+%ifdef RT_ARCH_AMD64
+ xor r8, r8
+ xor r9, r9
+ xor r10, r10
+ xor r11, r11
+ xor r12, r12
+ xor r13, r13
+ xor r14, r14
+ xor r15, r15
+%endif
+ ret
+; x861_ClearRegisters
+
+
+;;
+; Loads all MMX and SSE registers except xBP and xSP with unique values.
+;
+x861_LoadUniqueRegValuesSSE:
+ fninit
+ movq mm0, [REF(._mm0)]
+ movq mm1, [REF(._mm1)]
+ movq mm2, [REF(._mm2)]
+ movq mm3, [REF(._mm3)]
+ movq mm4, [REF(._mm4)]
+ movq mm5, [REF(._mm5)]
+ movq mm6, [REF(._mm6)]
+ movq mm7, [REF(._mm7)]
+ movdqu xmm0, [REF(._xmm0)]
+ movdqu xmm1, [REF(._xmm1)]
+ movdqu xmm2, [REF(._xmm2)]
+ movdqu xmm3, [REF(._xmm3)]
+ movdqu xmm4, [REF(._xmm4)]
+ movdqu xmm5, [REF(._xmm5)]
+ movdqu xmm6, [REF(._xmm6)]
+ movdqu xmm7, [REF(._xmm7)]
+%ifdef RT_ARCH_AMD64
+ movdqu xmm8, [REF(._xmm8)]
+ movdqu xmm9, [REF(._xmm9)]
+ movdqu xmm10, [REF(._xmm10)]
+ movdqu xmm11, [REF(._xmm11)]
+ movdqu xmm12, [REF(._xmm12)]
+ movdqu xmm13, [REF(._xmm13)]
+ movdqu xmm14, [REF(._xmm14)]
+ movdqu xmm15, [REF(._xmm15)]
+%endif
+ ret
+._mm0: times 8 db 040h
+._mm1: times 8 db 041h
+._mm2: times 8 db 042h
+._mm3: times 8 db 043h
+._mm4: times 8 db 044h
+._mm5: times 8 db 045h
+._mm6: times 8 db 046h
+._mm7: times 8 db 047h
+._xmm0: times 16 db 080h
+._xmm1: times 16 db 081h
+._xmm2: times 16 db 082h
+._xmm3: times 16 db 083h
+._xmm4: times 16 db 084h
+._xmm5: times 16 db 085h
+._xmm6: times 16 db 086h
+._xmm7: times 16 db 087h
+%ifdef RT_ARCH_AMD64
+._xmm8: times 16 db 088h
+._xmm9: times 16 db 089h
+._xmm10: times 16 db 08ah
+._xmm11: times 16 db 08bh
+._xmm12: times 16 db 08ch
+._xmm13: times 16 db 08dh
+._xmm14: times 16 db 08eh
+._xmm15: times 16 db 08fh
+%endif
+; end x861_LoadUniqueRegValuesSSE
+
+
+;;
+; Clears all MMX and SSE registers.
+;
+x861_ClearRegistersSSE:
+ fninit
+ movq mm0, [REF(.zero)]
+ movq mm1, [REF(.zero)]
+ movq mm2, [REF(.zero)]
+ movq mm3, [REF(.zero)]
+ movq mm4, [REF(.zero)]
+ movq mm5, [REF(.zero)]
+ movq mm6, [REF(.zero)]
+ movq mm7, [REF(.zero)]
+ movdqu xmm0, [REF(.zero)]
+ movdqu xmm1, [REF(.zero)]
+ movdqu xmm2, [REF(.zero)]
+ movdqu xmm3, [REF(.zero)]
+ movdqu xmm4, [REF(.zero)]
+ movdqu xmm5, [REF(.zero)]
+ movdqu xmm6, [REF(.zero)]
+ movdqu xmm7, [REF(.zero)]
+%ifdef RT_ARCH_AMD64
+ movdqu xmm8, [REF(.zero)]
+ movdqu xmm9, [REF(.zero)]
+ movdqu xmm10, [REF(.zero)]
+ movdqu xmm11, [REF(.zero)]
+ movdqu xmm12, [REF(.zero)]
+ movdqu xmm13, [REF(.zero)]
+ movdqu xmm14, [REF(.zero)]
+ movdqu xmm15, [REF(.zero)]
+%endif
+ ret
+
+ ret
+.zero times 16 db 000h
+; x861_ClearRegistersSSE
+
+
+;;
+; Loads all general, MMX and SSE registers except xBP and xSP with unique values.
+;
+x861_LoadUniqueRegValuesSSEAndGRegs:
+ call x861_LoadUniqueRegValuesSSE
+ call x861_LoadUniqueRegValues
+ ret
+
+;;
+; Clears all general, MMX and SSE registers except xBP and xSP.
+;
+x861_ClearRegistersSSEAndGRegs:
+ call x861_ClearRegistersSSE
+ call x861_ClearRegisters
+ ret
+
+BEGINPROC x861_Test1
+ push xBP
+ mov xBP, xSP
+ pushf
+ push xBX
+ push xCX
+ push xDX
+ push xSI
+ push xDI
+%ifdef RT_ARCH_AMD64
+ push r8
+ push r9
+ push r10
+ push r11
+ push r12
+ push r13
+ push r14
+ push r15
+%endif
+
+ ;
+ ; Odd push behavior
+ ;
+%if 0 ; Seems to be so on AMD only
+%ifdef RT_ARCH_X86
+ ; upper word of a 'push cs' is cleared.
+ mov eax, __LINE__
+ mov dword [esp - 4], 0f0f0f0fh
+ push cs
+ pop ecx
+ mov bx, cs
+ and ebx, 0000ffffh
+ cmp ecx, ebx
+ jne .failed
+
+ ; upper word of a 'push ds' is cleared.
+ mov eax, __LINE__
+ mov dword [esp - 4], 0f0f0f0fh
+ push ds
+ pop ecx
+ mov bx, ds
+ and ebx, 0000ffffh
+ cmp ecx, ebx
+ jne .failed
+
+ ; upper word of a 'push es' is cleared.
+ mov eax, __LINE__
+ mov dword [esp - 4], 0f0f0f0fh
+ push es
+ pop ecx
+ mov bx, es
+ and ebx, 0000ffffh
+ cmp ecx, ebx
+ jne .failed
+%endif ; RT_ARCH_X86
+
+ ; The upper part of a 'push fs' is cleared.
+ mov eax, __LINE__
+ xor ecx, ecx
+ not xCX
+ push xCX
+ pop xCX
+ push fs
+ pop xCX
+ mov bx, fs
+ and ebx, 0000ffffh
+ cmp xCX, xBX
+ jne .failed
+
+ ; The upper part of a 'push gs' is cleared.
+ mov eax, __LINE__
+ xor ecx, ecx
+ not xCX
+ push xCX
+ pop xCX
+ push gs
+ pop xCX
+ mov bx, gs
+ and ebx, 0000ffffh
+ cmp xCX, xBX
+ jne .failed
+%endif
+
+%ifdef RT_ARCH_AMD64
+ ; REX.B works with 'push r64'.
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ push rcx
+ pop rdx
+ cmp rdx, rcx
+ jne .failed
+
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ db 041h ; REX.B
+ push rcx
+ pop rdx
+ cmp rdx, r9
+ jne .failed
+
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ db 042h ; REX.X
+ push rcx
+ pop rdx
+ cmp rdx, rcx
+ jne .failed
+
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ db 044h ; REX.R
+ push rcx
+ pop rdx
+ cmp rdx, rcx
+ jne .failed
+
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ db 048h ; REX.W
+ push rcx
+ pop rdx
+ cmp rdx, rcx
+ jne .failed
+
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ db 04fh ; REX.*
+ push rcx
+ pop rdx
+ cmp rdx, r9
+ jne .failed
+%endif
+
+ ;
+ ; Zero extening when moving from a segreg as well as memory access sizes.
+ ;
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ mov ecx, ds
+ shr xCX, 16
+ cmp xCX, 0
+ jnz .failed
+
+%ifdef RT_ARCH_AMD64
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ mov rcx, ds
+ shr rcx, 16
+ cmp rcx, 0
+ jnz .failed
+%endif
+
+ call x861_LoadUniqueRegValues
+ mov eax, __LINE__
+ mov xDX, xCX
+ mov cx, ds
+ shr xCX, 16
+ shr xDX, 16
+ cmp xCX, xDX
+ jnz .failed
+
+ ; Loading is always a word access.
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ lea xDI, [xDI + 0x1000 - 2]
+ mov xDX, es
+ mov [xDI], dx
+ mov es, [xDI] ; should not crash
+
+ ; Saving is always a word access.
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ mov dword [xDI + 0x1000 - 4], -1
+ mov [xDI + 0x1000 - 2], ss ; Should not crash.
+ mov bx, ss
+ mov cx, [xDI + 0x1000 - 2]
+ cmp cx, bx
+ jne .failed
+
+%ifdef RT_ARCH_AMD64
+ ; Check that the rex.R and rex.W bits don't have any influence over a memory write.
+ call x861_ClearRegisters
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ mov dword [xDI + 0x1000 - 4], -1
+ db 04ah
+ mov [xDI + 0x1000 - 2], ss ; Should not crash.
+ mov bx, ss
+ mov cx, [xDI + 0x1000 - 2]
+ cmp cx, bx
+ jne .failed
+%endif
+
+
+ ;
+ ; Check what happens when both string prefixes are used.
+ ;
+ cld
+ mov dx, ds
+ mov es, dx
+
+ ; check that repne scasb (al=0) behaves like expected.
+ lea xDI, [REF(NAME(g_szAlpha))]
+ xor eax, eax ; find the end
+ mov ecx, g_cchAlpha + 1
+ repne scasb
+ cmp ecx, 1
+ mov eax, __LINE__
+ jne .failed
+
+ ; check that repe scasb (al=0) behaves like expected.
+ lea xDI, [REF(NAME(g_szAlpha))]
+ xor eax, eax ; find the end
+ mov ecx, g_cchAlpha + 1
+ repe scasb
+ cmp ecx, g_cchAlpha
+ mov eax, __LINE__
+ jne .failed
+
+ ; repne is last, it wins.
+ lea xDI, [REF(NAME(g_szAlpha))]
+ xor eax, eax ; find the end
+ mov ecx, g_cchAlpha + 1
+ db 0f3h ; repe - ignored
+ db 0f2h ; repne
+ scasb
+ cmp ecx, 1
+ mov eax, __LINE__
+ jne .failed
+
+ ; repe is last, it wins.
+ lea xDI, [REF(NAME(g_szAlpha))]
+ xor eax, eax ; find the end
+ mov ecx, g_cchAlpha + 1
+ db 0f2h ; repne - ignored
+ db 0f3h ; repe
+ scasb
+ cmp ecx, g_cchAlpha
+ mov eax, __LINE__
+ jne .failed
+
+ ;
+ ; Check if stosb works with both prefixes.
+ ;
+ cld
+ mov dx, ds
+ mov es, dx
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ xor eax, eax
+ mov ecx, 01000h
+ rep stosb
+
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ mov ecx, 4
+ mov eax, 0ffh
+ db 0f2h ; repne
+ stosb
+ mov eax, __LINE__
+ cmp ecx, 0
+ jne .failed
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ cmp dword [xDI], 0ffffffffh
+ jne .failed
+ cmp dword [xDI+4], 0
+ jne .failed
+
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ mov ecx, 4
+ mov eax, 0feh
+ db 0f3h ; repe
+ stosb
+ mov eax, __LINE__
+ cmp ecx, 0
+ jne .failed
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ cmp dword [xDI], 0fefefefeh
+ jne .failed
+ cmp dword [xDI+4], 0
+ jne .failed
+
+ ;
+ ; String operations shouldn't crash because of an invalid address if rCX is 0.
+ ;
+ mov eax, __LINE__
+ cld
+ mov dx, ds
+ mov es, dx
+ mov xDI, [REF_EXTERN(g_pbEfPage)]
+ xor xCX, xCX
+ rep stosb ; no trap
+
+ ;
+ ; INS/OUTS will trap in ring-3 even when rCX is 0. (ASSUMES IOPL < 3)
+ ;
+ mov eax, __LINE__
+ cld
+ mov dx, ss
+ mov ss, dx
+ mov xDI, xSP
+ xor xCX, xCX
+ ShouldTrap X86_XCPT_GP, rep insb
+
+ ;
+ ; SMSW can get to the whole of CR0.
+ ;
+ mov eax, __LINE__
+ xor xBX, xBX
+ smsw xBX
+ test ebx, X86_CR0_PG
+ jz .failed
+ test ebx, X86_CR0_PE
+ jz .failed
+
+ ;
+ ; Will the CPU decode the whole r/m+sib stuff before signalling a lock
+ ; prefix error? Use the EF exec page and a LOCK ADD CL,[rDI + disp32]
+ ; instruction at the very end of it.
+ ;
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 8h
+ mov byte [xDI+0], 0f0h
+ mov byte [xDI+1], 002h
+ mov byte [xDI+2], 08fh
+ mov dword [xDI+3], 000000000h
+ mov byte [xDI+7], 0cch
+ ShouldTrap X86_XCPT_UD, call xDI
+
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 7h
+ mov byte [xDI+0], 0f0h
+ mov byte [xDI+1], 002h
+ mov byte [xDI+2], 08Fh
+ mov dword [xDI+3], 000000000h
+ ShouldTrap X86_XCPT_UD, call xDI
+
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 4h
+ mov byte [xDI+0], 0f0h
+ mov byte [xDI+1], 002h
+ mov byte [xDI+2], 08Fh
+ mov byte [xDI+3], 000h
+ ShouldTrap X86_XCPT_PF, call xDI
+
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 6h
+ mov byte [xDI+0], 0f0h
+ mov byte [xDI+1], 002h
+ mov byte [xDI+2], 08Fh
+ mov byte [xDI+3], 00h
+ mov byte [xDI+4], 00h
+ mov byte [xDI+5], 00h
+ ShouldTrap X86_XCPT_PF, call xDI
+
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 5h
+ mov byte [xDI+0], 0f0h
+ mov byte [xDI+1], 002h
+ mov byte [xDI+2], 08Fh
+ mov byte [xDI+3], 00h
+ mov byte [xDI+4], 00h
+ ShouldTrap X86_XCPT_PF, call xDI
+
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 4h
+ mov byte [xDI+0], 0f0h
+ mov byte [xDI+1], 002h
+ mov byte [xDI+2], 08Fh
+ mov byte [xDI+3], 00h
+ ShouldTrap X86_XCPT_PF, call xDI
+
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 3h
+ mov byte [xDI+0], 0f0h
+ mov byte [xDI+1], 002h
+ mov byte [xDI+2], 08Fh
+ ShouldTrap X86_XCPT_PF, call xDI
+
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 2h
+ mov byte [xDI+0], 0f0h
+ mov byte [xDI+1], 002h
+ ShouldTrap X86_XCPT_PF, call xDI
+
+ mov eax, __LINE__
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, 1000h - 1h
+ mov byte [xDI+0], 0f0h
+ ShouldTrap X86_XCPT_PF, call xDI
+
+
+
+.success:
+ xor eax, eax
+.return:
+%ifdef RT_ARCH_AMD64
+ pop r15
+ pop r14
+ pop r13
+ pop r12
+ pop r11
+ pop r10
+ pop r9
+ pop r8
+%endif
+ pop xDI
+ pop xSI
+ pop xDX
+ pop xCX
+ pop xBX
+ popf
+ leave
+ ret
+
+.failed2:
+ mov eax, -1
+.failed:
+ jmp .return
+ENDPROC x861_Test1
+
+
+
+;;
+; Tests the effect of prefix order in group 14.
+;
+BEGINPROC x861_Test2
+ SAVE_ALL_PROLOGUE
+
+ ; Check testcase preconditions.
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 00Fh, 073h, 0D0h, 080h ; psrlq mm0, 128
+ call .check_mm0_zero_and_xmm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 00Fh, 073h, 0D0h, 080h ; psrlq xmm0, 128
+ call .check_xmm0_zero_and_mm0_nz
+
+
+ ;
+ ; Real test - Inject other prefixes before the 066h and see what
+ ; happens.
+ ;
+
+ ; General checks that order does not matter, etc.
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 026h, 066h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 026h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 067h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 067h, 066h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 067h, 066h, 065h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+%ifdef RT_ARCH_AMD64
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 048h, 066h, 00Fh, 073h, 0D0h, 080h ; REX.W
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 044h, 066h, 00Fh, 073h, 0D0h, 080h ; REX.R
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 042h, 066h, 00Fh, 073h, 0D0h, 080h ; REX.X
+ call .check_xmm0_zero_and_mm0_nz
+
+ ; Actually for REX, order does matter if the prefix is used.
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 041h, 066h, 00Fh, 073h, 0D0h, 080h ; REX.B
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 041h, 00Fh, 073h, 0D0h, 080h ; REX.B
+ call .check_xmm8_zero_and_xmm0_nz
+%endif
+
+ ; Check all ignored prefixes (repeates some of the above).
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 026h, 00Fh, 073h, 0D0h, 080h ; es
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 065h, 00Fh, 073h, 0D0h, 080h ; gs
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 064h, 00Fh, 073h, 0D0h, 080h ; fs
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 02eh, 00Fh, 073h, 0D0h, 080h ; cs
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 036h, 00Fh, 073h, 0D0h, 080h ; ss
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 03eh, 00Fh, 073h, 0D0h, 080h ; ds
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 067h, 00Fh, 073h, 0D0h, 080h ; addr size
+ call .check_xmm0_zero_and_mm0_nz
+
+%ifdef RT_ARCH_AMD64
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 048h, 00Fh, 073h, 0D0h, 080h ; REX.W
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 044h, 00Fh, 073h, 0D0h, 080h ; REX.R
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 042h, 00Fh, 073h, 0D0h, 080h ; REX.X
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 041h, 00Fh, 073h, 0D0h, 080h ; REX.B - has actual effect on the instruction.
+ call .check_xmm8_zero_and_xmm0_nz
+%endif
+
+ ; Repeated prefix until we hit the max opcode limit.
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 066h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 066h, 066h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+
+ ShouldTrap X86_XCPT_GP, db 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 066h, 00Fh, 073h, 0D0h, 080h
+
+%ifdef RT_ARCH_AMD64
+ ; Repeated REX is parsed, but only the last byte matters.
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 041h, 048h, 00Fh, 073h, 0D0h, 080h ; REX.B, REX.W
+ call .check_xmm0_zero_and_mm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 048h, 041h, 00Fh, 073h, 0D0h, 080h ; REX.B, REX.W
+ call .check_xmm8_zero_and_xmm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 048h, 044h, 042h, 048h, 044h, 042h, 048h, 044h, 042h, 041h, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm8_zero_and_xmm0_nz
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov eax, __LINE__
+ db 066h, 041h, 041h, 041h, 041h, 041h, 041h, 041h, 041h, 041h, 04eh, 00Fh, 073h, 0D0h, 080h
+ call .check_xmm0_zero_and_mm0_nz
+%endif
+
+ ; Undefined sequences with prefixes that counts.
+ ShouldTrap X86_XCPT_UD, db 0f0h, 066h, 00Fh, 073h, 0D0h, 080h ; LOCK
+ ShouldTrap X86_XCPT_UD, db 0f2h, 066h, 00Fh, 073h, 0D0h, 080h ; REPNZ
+ ShouldTrap X86_XCPT_UD, db 0f3h, 066h, 00Fh, 073h, 0D0h, 080h ; REPZ
+ ShouldTrap X86_XCPT_UD, db 066h, 0f2h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 066h, 0f3h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 066h, 0f3h, 0f2h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 066h, 0f2h, 0f3h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 0f2h, 066h, 0f3h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 0f3h, 066h, 0f2h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 0f3h, 0f2h, 066h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 0f2h, 0f3h, 066h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 0f0h, 0f2h, 066h, 0f3h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 0f0h, 0f3h, 066h, 0f2h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 0f0h, 0f3h, 0f2h, 066h, 00Fh, 073h, 0D0h, 080h
+ ShouldTrap X86_XCPT_UD, db 0f0h, 0f2h, 0f3h, 066h, 00Fh, 073h, 0D0h, 080h
+
+.success:
+ xor eax, eax
+.return:
+ SAVE_ALL_EPILOGUE
+ ret
+
+.check_xmm0_zero_and_mm0_nz:
+ sub xSP, 20h
+ movdqu [xSP], xmm0
+ cmp dword [xSP], 0
+ jne .failed3
+ cmp dword [xSP + 4], 0
+ jne .failed3
+ cmp dword [xSP + 8], 0
+ jne .failed3
+ cmp dword [xSP + 12], 0
+ jne .failed3
+ movq [xSP], mm0
+ cmp dword [xSP], 0
+ je .failed3
+ cmp dword [xSP + 4], 0
+ je .failed3
+ add xSP, 20h
+ ret
+
+.check_mm0_zero_and_xmm0_nz:
+ sub xSP, 20h
+ movq [xSP], mm0
+ cmp dword [xSP], 0
+ jne .failed3
+ cmp dword [xSP + 4], 0
+ jne .failed3
+ movdqu [xSP], xmm0
+ cmp dword [xSP], 0
+ je .failed3
+ cmp dword [xSP + 4], 0
+ je .failed3
+ cmp dword [xSP + 8], 0
+ je .failed3
+ cmp dword [xSP + 12], 0
+ je .failed3
+ add xSP, 20h
+ ret
+
+%ifdef RT_ARCH_AMD64
+.check_xmm8_zero_and_xmm0_nz:
+ sub xSP, 20h
+ movdqu [xSP], xmm8
+ cmp dword [xSP], 0
+ jne .failed3
+ cmp dword [xSP + 4], 0
+ jne .failed3
+ cmp dword [xSP + 8], 0
+ jne .failed3
+ cmp dword [xSP + 12], 0
+ jne .failed3
+ movdqu [xSP], xmm0
+ cmp dword [xSP], 0
+ je .failed3
+ cmp dword [xSP + 4], 0
+ je .failed3
+ cmp dword [xSP + 8], 0
+ je .failed3
+ cmp dword [xSP + 12], 0
+ je .failed3
+ add xSP, 20h
+ ret
+%endif
+
+.failed3:
+ add xSP, 20h + xCB
+ jmp .return
+
+
+ENDPROC x861_Test2
+
+
+;;
+; Tests how much fxsave and fxrstor actually accesses of their 512 memory
+; operand.
+;
+BEGINPROC x861_Test3
+ SAVE_ALL_PROLOGUE
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+
+ ; Check testcase preconditions.
+ fxsave [xDI]
+ fxrstor [xDI]
+
+ add xDI, PAGE_SIZE - 512
+ mov xSI, xDI
+ fxsave [xDI]
+ fxrstor [xDI]
+
+ ; 464:511 are available to software use. Check that they are left
+ ; untouched by fxsave.
+ mov eax, 0aabbccddh
+ mov ecx, 512 / 4
+ cld
+ rep stosd
+ mov xDI, xSI
+ fxsave [xDI]
+
+ mov ebx, 512
+.chech_software_area_loop:
+ cmp [xDI + xBX - 4], eax
+ jne .chech_software_area_done
+ sub ebx, 4
+ jmp .chech_software_area_loop
+.chech_software_area_done:
+ cmp ebx, 464
+ mov eax, __LINE__
+ ja .return
+
+ ; Check that a save + restore + save cycle yield the same results.
+ mov xBX, [REF_EXTERN(g_pbEfExecPage)]
+ mov xDI, xBX
+ mov eax, 066778899h
+ mov ecx, 512 * 2 / 4
+ cld
+ rep stosd
+ fxsave [xBX]
+
+ call x861_ClearRegistersSSEAndGRegs
+ mov xBX, [REF_EXTERN(g_pbEfExecPage)]
+ fxrstor [xBX]
+
+ fxsave [xBX + 512]
+ mov xSI, xBX
+ lea xDI, [xBX + 512]
+ mov ecx, 512
+ cld
+ repe cmpsb
+ mov eax, __LINE__
+ jnz .return
+
+
+ ; 464:511 are available to software use. Let see how carefully access
+ ; to the full 512 bytes are checked...
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, PAGE_SIZE - 512
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 16]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 32]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 48]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 64]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 80]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 96]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 128]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 144]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 160]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 176]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 192]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 208]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 224]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 240]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 256]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 384]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 432]
+ ShouldTrap X86_XCPT_PF, fxsave [xDI + 496]
+
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 16]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 32]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 48]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 64]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 80]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 96]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 128]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 144]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 160]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 176]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 192]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 208]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 224]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 240]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 256]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 384]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 432]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + 496]
+
+ ; Unaligned accesses will cause #GP(0). This takes precedence over #PF.
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 1]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 2]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 3]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 4]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 5]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 6]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 7]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 8]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 9]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 10]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 11]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 12]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 13]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 14]
+ ShouldTrap X86_XCPT_GP, fxsave [xDI + 15]
+
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 1]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 2]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 3]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 4]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 5]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 6]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 7]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 8]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 9]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 10]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 11]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 12]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 13]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 14]
+ ShouldTrap X86_XCPT_GP, fxrstor [xDI + 15]
+
+ ; Lets check what a FP in fxsave changes ... nothing on intel.
+ mov ebx, 16
+.fxsave_pf_effect_loop:
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, PAGE_SIZE - 512 * 2
+ mov xSI, xDI
+ mov eax, 066778899h
+ mov ecx, 512 * 2 / 4
+ cld
+ rep stosd
+
+ ShouldTrap X86_XCPT_PF, fxsave [xSI + PAGE_SIZE - 512 + xBX]
+
+ mov ecx, 512 / 4
+ lea xDI, [xSI + 512]
+ cld
+ repz cmpsd
+ lea xAX, [xBX + 20000]
+ jnz .return
+
+ add ebx, 16
+ cmp ebx, 512
+ jbe .fxsave_pf_effect_loop
+
+ ; Lets check that a FP in fxrstor does not have any effect on the FPU or SSE state.
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ mov ecx, PAGE_SIZE / 4
+ mov eax, 0ffaa33cch
+ cld
+ rep stosd
+
+ call x861_LoadUniqueRegValuesSSEAndGRegs
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ fxsave [xDI]
+
+ call x861_ClearRegistersSSEAndGRegs
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ fxsave [xDI + 512]
+
+ mov ebx, 16
+.fxrstor_pf_effect_loop:
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ mov xSI, xDI
+ lea xDI, [xDI + PAGE_SIZE - 512 + xBX]
+ mov ecx, 512
+ sub ecx, ebx
+ cld
+ rep movsb ; copy unique state to end of page.
+
+ push xBX
+ call x861_ClearRegistersSSEAndGRegs
+ pop xBX
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ ShouldTrap X86_XCPT_PF, fxrstor [xDI + PAGE_SIZE - 512 + xBX] ; try load unique state
+
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ lea xSI, [xDI + 512] ; point it to the clean state, which is what we expect.
+ lea xDI, [xDI + 1024]
+ fxsave [xDI] ; save whatever the fpu state currently is.
+ mov ecx, 512 / 4
+ cld
+ repe cmpsd
+ lea xAX, [xBX + 40000]
+ jnz .return ; it shouldn't be modified by faulting fxrstor, i.e. a clean state.
+
+ add ebx, 16
+ cmp ebx, 512
+ jbe .fxrstor_pf_effect_loop
+
+.success:
+ xor eax, eax
+.return:
+ SAVE_ALL_EPILOGUE
+ ret
+ENDPROC x861_Test3
+
+
+;;
+; Tests various multibyte NOP sequences.
+;
+BEGINPROC x861_Test4
+ SAVE_ALL_PROLOGUE
+ call x861_ClearRegisters
+
+ ; Intel recommended sequences.
+ nop
+ db 066h, 090h
+ db 00fh, 01fh, 000h
+ db 00fh, 01fh, 040h, 000h
+ db 00fh, 01fh, 044h, 000h, 000h
+ db 066h, 00fh, 01fh, 044h, 000h, 000h
+ db 00fh, 01fh, 080h, 000h, 000h, 000h, 000h
+ db 00fh, 01fh, 084h, 000h, 000h, 000h, 000h, 000h
+ db 066h, 00fh, 01fh, 084h, 000h, 000h, 000h, 000h, 000h
+
+ ; Check that the NOPs are allergic to lock prefixing.
+ ShouldTrap X86_XCPT_UD, db 0f0h, 090h ; lock prefixed NOP.
+ ShouldTrap X86_XCPT_UD, db 0f0h, 066h, 090h ; lock prefixed two byte NOP.
+ ShouldTrap X86_XCPT_UD, db 0f0h, 00fh, 01fh, 000h ; lock prefixed three byte NOP.
+
+ ; Check the range of instructions that AMD marks as NOPs.
+%macro TST_NOP 1
+ db 00fh, %1, 000h
+ db 00fh, %1, 040h, 000h
+ db 00fh, %1, 044h, 000h, 000h
+ db 066h, 00fh, %1, 044h, 000h, 000h
+ db 00fh, %1, 080h, 000h, 000h, 000h, 000h
+ db 00fh, %1, 084h, 000h, 000h, 000h, 000h, 000h
+ db 066h, 00fh, %1, 084h, 000h, 000h, 000h, 000h, 000h
+ ShouldTrap X86_XCPT_UD, db 0f0h, 00fh, %1, 000h
+%endmacro
+ TST_NOP 019h
+ TST_NOP 01ah
+ TST_NOP 01bh
+ TST_NOP 01ch
+ TST_NOP 01dh
+ TST_NOP 01eh
+ TST_NOP 01fh
+
+ ; The AMD P group, intel marks this as a NOP.
+ TST_NOP 00dh
+
+.success:
+ xor eax, eax
+.return:
+ SAVE_ALL_EPILOGUE
+ ret
+ENDPROC x861_Test4
+
+
+;;
+; Tests various odd/weird/bad encodings.
+;
+BEGINPROC x861_Test5
+ SAVE_ALL_PROLOGUE
+ call x861_ClearRegisters
+
+%if 0
+ ; callf eax...
+ ShouldTrap X86_XCPT_UD, db 0xff, 11011000b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11011001b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11011010b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11011011b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11011100b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11011101b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11011110b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11011111b
+
+ ; jmpf eax...
+ ShouldTrap X86_XCPT_UD, db 0xff, 11101000b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11101001b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11101010b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11101011b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11101100b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11101101b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11101110b
+ ShouldTrap X86_XCPT_UD, db 0xff, 11101111b
+
+ ; #GP(0) vs #UD.
+ ShouldTrap X86_XCPT_GP, mov xAX, cr0
+ ShouldTrap X86_XCPT_UD, lock mov xAX, cr0
+ ShouldTrap X86_XCPT_GP, mov cr0, xAX
+ ShouldTrap X86_XCPT_UD, lock mov cr0, xAX
+ ShouldTrap X86_XCPT_UD, db 0x0f, 0x20,11001000b ; mov xAX, cr1
+ ShouldTrap X86_XCPT_UD, db 0x0f, 0x20,11101000b ; mov xAX, cr5
+ ShouldTrap X86_XCPT_UD, db 0x0f, 0x20,11110000b ; mov xAX, cr6
+ ShouldTrap X86_XCPT_UD, db 0x0f, 0x20,11111000b ; mov xAX, cr7
+ ShouldTrap X86_XCPT_GP, mov xAX, dr7
+ ShouldTrap X86_XCPT_UD, lock mov xAX, dr7
+
+ ; The MOD is ignored by MOV CRx,GReg and MOV GReg,CRx
+ ShouldTrap X86_XCPT_GP, db 0x0f, 0x20,00000000b ; mov xAX, cr0
+ ShouldTrap X86_XCPT_GP, db 0x0f, 0x20,01000000b ; mov xAX, cr0
+ ShouldTrap X86_XCPT_GP, db 0x0f, 0x20,10000000b ; mov xAX, cr0
+ ShouldTrap X86_XCPT_GP, db 0x0f, 0x20,11000000b ; mov xAX, cr0
+ ShouldTrap X86_XCPT_GP, db 0x0f, 0x22,00000000b ; mov cr0, xAX
+ ShouldTrap X86_XCPT_GP, db 0x0f, 0x22,01000000b ; mov cr0, xAX
+ ShouldTrap X86_XCPT_GP, db 0x0f, 0x22,10000000b ; mov cr0, xAX
+ ShouldTrap X86_XCPT_GP, db 0x0f, 0x22,11000000b ; mov cr0, xAX
+%endif
+
+ ; mov eax, tr0, 0x0f 0x24
+ ShouldTrap X86_XCPT_UD, db 0x0f, 0x24, 0xc0 ; mov xAX, tr1
+
+ mov xAX, [REF_EXTERN(g_pbEfExecPage)]
+ add xAX, PAGE_SIZE - 3
+ mov byte [xAX ], 0x0f
+ mov byte [xAX + 1], 0x24
+ mov byte [xAX + 2], 0xc0
+ ShouldTrapExecPage X86_XCPT_UD, PAGE_SIZE - 3
+
+ mov xAX, [REF_EXTERN(g_pbEfExecPage)]
+ add xAX, PAGE_SIZE - 2
+ mov byte [xAX ], 0x0f
+ mov byte [xAX + 1], 0x24
+ ShouldTrapExecPage X86_XCPT_UD, PAGE_SIZE - 2
+
+.success:
+ xor eax, eax
+.return:
+ SAVE_ALL_EPILOGUE
+ ret
+ENDPROC x861_Test5
+
+
+;;
+; Tests an reserved FPU encoding, checking that it does not affect the FPU or
+; CPU state in any way.
+;
+; @uses stack
+%macro FpuNopEncoding 1+
+ fnclex
+ call SetFSW_C0_thru_C3
+
+ push xBP
+ mov xBP, xSP
+ sub xSP, 1024
+ and xSP, ~0fh
+ call SaveFPUAndGRegsToStack
+ %1
+ call CompareFPUAndGRegsOnStackIgnoreOpAndIp
+ leave
+
+ jz %%ok
+ add eax, __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+;;
+; Used for marking encodings which has a meaning other than FNOP and
+; needs investigating.
+%macro FpuReservedEncoding 2
+ fnclex
+ call SetFSW_C0_thru_C3
+
+ push xBP
+ mov xBP, xSP
+ sub xSP, 2048
+ and xSP, ~0fh
+ mov dword [xSP + 1024 + X86FXSTATE.FPUIP], 0
+ mov dword [xSP + 1024 + X86FXSTATE.FPUCS], 0
+ mov dword [xSP + 1024 + X86FXSTATE.FPUDP], 0
+ mov dword [xSP + 1024 + X86FXSTATE.FPUDS], 0
+ arch_fxsave [xSP + 1024]
+ %1
+ call SaveFPUAndGRegsToStack
+
+ arch_fxrstor [xSP + 1024]
+ %2
+ call CompareFPUAndGRegsOnStackIgnoreOpAndIp
+ ;arch_fxrstor [xSP + 1024]
+ leave
+
+ jz %%ok
+ add eax, __LINE__
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Saves the FPU and general registers to the stack area right next to the
+; return address.
+;
+; The required area size is 512 + 80h = 640.
+;
+; @uses Nothing, except stack.
+;
+SaveFPUAndGRegsToStack:
+ ; Must clear the FXSAVE area.
+ pushf
+ push xCX
+ push xAX
+ push xDI
+
+ lea xDI, [xSP + xCB * 5]
+ mov xCX, 512 / 4
+ mov eax, 0cccccccch
+ cld
+ rep stosd
+
+ pop xDI
+ pop xAX
+ pop xCX
+ popf
+
+ ; Save the FPU state.
+ mov dword [xSP + xCB + X86FXSTATE.FPUIP], 0
+ mov dword [xSP + xCB + X86FXSTATE.FPUCS], 0
+ mov dword [xSP + xCB + X86FXSTATE.FPUDP], 0
+ mov dword [xSP + xCB + X86FXSTATE.FPUDS], 0
+ arch_fxsave [xSP + xCB]
+
+ ; Save GRegs (80h bytes).
+%ifdef RT_ARCH_AMD64
+ mov [xSP + 512 + xCB + 000h], xAX
+ mov [xSP + 512 + xCB + 008h], xBX
+ mov [xSP + 512 + xCB + 010h], xCX
+ mov [xSP + 512 + xCB + 018h], xDX
+ mov [xSP + 512 + xCB + 020h], xDI
+ mov [xSP + 512 + xCB + 028h], xSI
+ mov [xSP + 512 + xCB + 030h], xBP
+ mov [xSP + 512 + xCB + 038h], r8
+ mov [xSP + 512 + xCB + 040h], r9
+ mov [xSP + 512 + xCB + 048h], r10
+ mov [xSP + 512 + xCB + 050h], r11
+ mov [xSP + 512 + xCB + 058h], r12
+ mov [xSP + 512 + xCB + 060h], r13
+ mov [xSP + 512 + xCB + 068h], r14
+ mov [xSP + 512 + xCB + 070h], r15
+ pushf
+ pop rax
+ mov [xSP + 512 + xCB + 078h], rax
+ mov rax, [xSP + 512 + xCB + 000h]
+%else
+ mov [xSP + 512 + xCB + 000h], eax
+ mov [xSP + 512 + xCB + 004h], eax
+ mov [xSP + 512 + xCB + 008h], ebx
+ mov [xSP + 512 + xCB + 00ch], ebx
+ mov [xSP + 512 + xCB + 010h], ecx
+ mov [xSP + 512 + xCB + 014h], ecx
+ mov [xSP + 512 + xCB + 018h], edx
+ mov [xSP + 512 + xCB + 01ch], edx
+ mov [xSP + 512 + xCB + 020h], edi
+ mov [xSP + 512 + xCB + 024h], edi
+ mov [xSP + 512 + xCB + 028h], esi
+ mov [xSP + 512 + xCB + 02ch], esi
+ mov [xSP + 512 + xCB + 030h], ebp
+ mov [xSP + 512 + xCB + 034h], ebp
+ mov [xSP + 512 + xCB + 038h], eax
+ mov [xSP + 512 + xCB + 03ch], eax
+ mov [xSP + 512 + xCB + 040h], eax
+ mov [xSP + 512 + xCB + 044h], eax
+ mov [xSP + 512 + xCB + 048h], eax
+ mov [xSP + 512 + xCB + 04ch], eax
+ mov [xSP + 512 + xCB + 050h], eax
+ mov [xSP + 512 + xCB + 054h], eax
+ mov [xSP + 512 + xCB + 058h], eax
+ mov [xSP + 512 + xCB + 05ch], eax
+ mov [xSP + 512 + xCB + 060h], eax
+ mov [xSP + 512 + xCB + 064h], eax
+ mov [xSP + 512 + xCB + 068h], eax
+ mov [xSP + 512 + xCB + 06ch], eax
+ mov [xSP + 512 + xCB + 070h], eax
+ mov [xSP + 512 + xCB + 074h], eax
+ pushf
+ pop eax
+ mov [xSP + 512 + xCB + 078h], eax
+ mov [xSP + 512 + xCB + 07ch], eax
+ mov eax, [xSP + 512 + xCB + 000h]
+%endif
+ ret
+
+;;
+; Compares the current FPU and general registers to that found in the stack
+; area prior to the return address.
+;
+; @uses Stack, flags and eax/rax.
+; @returns eax is zero on success, eax is 1000000 * offset on failure.
+; ZF reflects the eax value to save a couple of instructions...
+;
+CompareFPUAndGRegsOnStack:
+ lea xSP, [xSP - (1024 - xCB)]
+ call SaveFPUAndGRegsToStack
+
+ push xSI
+ push xDI
+ push xCX
+
+ mov xCX, 640
+ lea xSI, [xSP + xCB*3]
+ lea xDI, [xSI + 1024]
+
+ cld
+ repe cmpsb
+ je .ok
+
+ ;int3
+ lea xAX, [xSP + xCB*3]
+ xchg xAX, xSI
+ sub xAX, xSI
+
+ push xDX
+ mov xDX, 1000000
+ mul xDX
+ pop xDX
+ jmp .return
+.ok:
+ xor eax, eax
+.return:
+ pop xCX
+ pop xDI
+ pop xSI
+ lea xSP, [xSP + (1024 - xCB)]
+ or eax, eax
+ ret
+
+;;
+; Same as CompareFPUAndGRegsOnStack, except that it ignores the FOP and FPUIP
+; registers.
+;
+; @uses Stack, flags and eax/rax.
+; @returns eax is zero on success, eax is 1000000 * offset on failure.
+; ZF reflects the eax value to save a couple of instructions...
+;
+CompareFPUAndGRegsOnStackIgnoreOpAndIp:
+ lea xSP, [xSP - (1024 - xCB)]
+ call SaveFPUAndGRegsToStack
+
+ push xSI
+ push xDI
+ push xCX
+
+ mov xCX, 640
+ lea xSI, [xSP + xCB*3]
+ lea xDI, [xSI + 1024]
+
+ mov word [xSI + X86FXSTATE.FOP], 0 ; ignore
+ mov word [xDI + X86FXSTATE.FOP], 0 ; ignore
+ mov dword [xSI + X86FXSTATE.FPUIP], 0 ; ignore
+ mov dword [xDI + X86FXSTATE.FPUIP], 0 ; ignore
+
+ cld
+ repe cmpsb
+ je .ok
+
+ ;int3
+ lea xAX, [xSP + xCB*3]
+ xchg xAX, xSI
+ sub xAX, xSI
+
+ push xDX
+ mov xDX, 1000000
+ mul xDX
+ pop xDX
+ jmp .return
+.ok:
+ xor eax, eax
+.return:
+ pop xCX
+ pop xDI
+ pop xSI
+ lea xSP, [xSP + (1024 - xCB)]
+ or eax, eax
+ ret
+
+
+SetFSW_C0_thru_C3:
+ sub xSP, 20h
+ fstenv [xSP]
+ or word [xSP + 4], X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3
+ fldenv [xSP]
+ add xSP, 20h
+ ret
+
+
+;;
+; Tests some odd floating point instruction encodings.
+;
+BEGINPROC x861_Test6
+ SAVE_ALL_PROLOGUE
+
+ ; standard stuff...
+ fld dword [REF(g_r32V1)]
+ fld qword [REF(g_r64V1)]
+ fld tword [REF(g_r80V1)]
+ fld qword [REF(g_r64V1)]
+ fld dword [REF(g_r32V2)]
+ fld dword [REF(g_r32V1)]
+
+ ; Test the nop check.
+ FpuNopEncoding fnop
+
+
+ ; the 0xd9 block
+ ShouldTrap X86_XCPT_UD, db 0d9h, 008h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 009h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 00ah
+ ShouldTrap X86_XCPT_UD, db 0d9h, 00bh
+ ShouldTrap X86_XCPT_UD, db 0d9h, 00ch
+ ShouldTrap X86_XCPT_UD, db 0d9h, 00dh
+ ShouldTrap X86_XCPT_UD, db 0d9h, 00eh
+ ShouldTrap X86_XCPT_UD, db 0d9h, 00fh
+
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0d1h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0d2h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0d3h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0d4h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0d5h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0d6h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0d7h
+ FpuReservedEncoding {db 0d9h, 0d8h}, { fstp st0 }
+ FpuReservedEncoding {db 0d9h, 0d9h}, { fstp st1 }
+ FpuReservedEncoding {db 0d9h, 0dah}, { fstp st2 }
+ FpuReservedEncoding {db 0d9h, 0dbh}, { fstp st3 }
+ FpuReservedEncoding {db 0d9h, 0dch}, { fstp st4 }
+ FpuReservedEncoding {db 0d9h, 0ddh}, { fstp st5 }
+ FpuReservedEncoding {db 0d9h, 0deh}, { fstp st6 }
+ ;FpuReservedEncoding {db 0d9h, 0dfh}, { fstp st7 } ; This variant seems to ignore empty ST(0) values!
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0e2h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0e3h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0e6h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0e7h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 0efh
+ ShouldTrap X86_XCPT_UD, db 0d9h, 008h
+ ShouldTrap X86_XCPT_UD, db 0d9h, 00fh
+
+ ; the 0xda block
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e0h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e1h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e2h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e3h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e4h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e5h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e6h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e7h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0e8h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0eah
+ ShouldTrap X86_XCPT_UD, db 0dah, 0ebh
+ ShouldTrap X86_XCPT_UD, db 0dah, 0ech
+ ShouldTrap X86_XCPT_UD, db 0dah, 0edh
+ ShouldTrap X86_XCPT_UD, db 0dah, 0eeh
+ ShouldTrap X86_XCPT_UD, db 0dah, 0efh
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f0h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f1h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f2h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f3h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f4h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f5h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f6h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f7h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f8h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0f9h
+ ShouldTrap X86_XCPT_UD, db 0dah, 0fah
+ ShouldTrap X86_XCPT_UD, db 0dah, 0fbh
+ ShouldTrap X86_XCPT_UD, db 0dah, 0fch
+ ShouldTrap X86_XCPT_UD, db 0dah, 0fdh
+ ShouldTrap X86_XCPT_UD, db 0dah, 0feh
+ ShouldTrap X86_XCPT_UD, db 0dah, 0ffh
+
+ ; the 0xdb block
+ FpuNopEncoding db 0dbh, 0e0h ; fneni
+ FpuNopEncoding db 0dbh, 0e1h ; fndisi
+ FpuNopEncoding db 0dbh, 0e4h ; fnsetpm
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0e5h
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0e6h
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0e7h
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0f8h
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0f9h
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0fah
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0fbh
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0fch
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0fdh
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0feh
+ ShouldTrap X86_XCPT_UD, db 0dbh, 0ffh
+ ShouldTrap X86_XCPT_UD, db 0dbh, 020h
+ ShouldTrap X86_XCPT_UD, db 0dbh, 023h
+ ShouldTrap X86_XCPT_UD, db 0dbh, 030h
+ ShouldTrap X86_XCPT_UD, db 0dbh, 032h
+
+ ; the 0xdc block
+ FpuReservedEncoding {db 0dch, 0d0h}, { fcom st0 }
+ FpuReservedEncoding {db 0dch, 0d1h}, { fcom st1 }
+ FpuReservedEncoding {db 0dch, 0d2h}, { fcom st2 }
+ FpuReservedEncoding {db 0dch, 0d3h}, { fcom st3 }
+ FpuReservedEncoding {db 0dch, 0d4h}, { fcom st4 }
+ FpuReservedEncoding {db 0dch, 0d5h}, { fcom st5 }
+ FpuReservedEncoding {db 0dch, 0d6h}, { fcom st6 }
+ FpuReservedEncoding {db 0dch, 0d7h}, { fcom st7 }
+ FpuReservedEncoding {db 0dch, 0d8h}, { fcomp st0 }
+ FpuReservedEncoding {db 0dch, 0d9h}, { fcomp st1 }
+ FpuReservedEncoding {db 0dch, 0dah}, { fcomp st2 }
+ FpuReservedEncoding {db 0dch, 0dbh}, { fcomp st3 }
+ FpuReservedEncoding {db 0dch, 0dch}, { fcomp st4 }
+ FpuReservedEncoding {db 0dch, 0ddh}, { fcomp st5 }
+ FpuReservedEncoding {db 0dch, 0deh}, { fcomp st6 }
+ FpuReservedEncoding {db 0dch, 0dfh}, { fcomp st7 }
+
+ ; the 0xdd block
+ FpuReservedEncoding {db 0ddh, 0c8h}, { fxch st0 }
+ FpuReservedEncoding {db 0ddh, 0c9h}, { fxch st1 }
+ FpuReservedEncoding {db 0ddh, 0cah}, { fxch st2 }
+ FpuReservedEncoding {db 0ddh, 0cbh}, { fxch st3 }
+ FpuReservedEncoding {db 0ddh, 0cch}, { fxch st4 }
+ FpuReservedEncoding {db 0ddh, 0cdh}, { fxch st5 }
+ FpuReservedEncoding {db 0ddh, 0ceh}, { fxch st6 }
+ FpuReservedEncoding {db 0ddh, 0cfh}, { fxch st7 }
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f0h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f1h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f2h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f3h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f4h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f5h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f6h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f7h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f8h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0f9h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0fah
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0fbh
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0fch
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0fdh
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0feh
+ ShouldTrap X86_XCPT_UD, db 0ddh, 0ffh
+ ShouldTrap X86_XCPT_UD, db 0ddh, 028h
+ ShouldTrap X86_XCPT_UD, db 0ddh, 02fh
+
+ ; the 0xde block
+ FpuReservedEncoding {db 0deh, 0d0h}, { fcomp st0 }
+ FpuReservedEncoding {db 0deh, 0d1h}, { fcomp st1 }
+ FpuReservedEncoding {db 0deh, 0d2h}, { fcomp st2 }
+ FpuReservedEncoding {db 0deh, 0d3h}, { fcomp st3 }
+ FpuReservedEncoding {db 0deh, 0d4h}, { fcomp st4 }
+ FpuReservedEncoding {db 0deh, 0d5h}, { fcomp st5 }
+ FpuReservedEncoding {db 0deh, 0d6h}, { fcomp st6 }
+ FpuReservedEncoding {db 0deh, 0d7h}, { fcomp st7 }
+ ShouldTrap X86_XCPT_UD, db 0deh, 0d8h
+ ShouldTrap X86_XCPT_UD, db 0deh, 0dah
+ ShouldTrap X86_XCPT_UD, db 0deh, 0dbh
+ ShouldTrap X86_XCPT_UD, db 0deh, 0dch
+ ShouldTrap X86_XCPT_UD, db 0deh, 0ddh
+ ShouldTrap X86_XCPT_UD, db 0deh, 0deh
+ ShouldTrap X86_XCPT_UD, db 0deh, 0dfh
+
+ ; the 0xdf block
+ FpuReservedEncoding {db 0dfh, 0c8h}, { fxch st0 }
+ FpuReservedEncoding {db 0dfh, 0c9h}, { fxch st1 }
+ FpuReservedEncoding {db 0dfh, 0cah}, { fxch st2 }
+ FpuReservedEncoding {db 0dfh, 0cbh}, { fxch st3 }
+ FpuReservedEncoding {db 0dfh, 0cch}, { fxch st4 }
+ FpuReservedEncoding {db 0dfh, 0cdh}, { fxch st5 }
+ FpuReservedEncoding {db 0dfh, 0ceh}, { fxch st6 }
+ FpuReservedEncoding {db 0dfh, 0cfh}, { fxch st7 }
+ FpuReservedEncoding {db 0dfh, 0d0h}, { fstp st0 }
+ FpuReservedEncoding {db 0dfh, 0d1h}, { fstp st1 }
+ FpuReservedEncoding {db 0dfh, 0d2h}, { fstp st2 }
+ FpuReservedEncoding {db 0dfh, 0d3h}, { fstp st3 }
+ FpuReservedEncoding {db 0dfh, 0d4h}, { fstp st4 }
+ FpuReservedEncoding {db 0dfh, 0d5h}, { fstp st5 }
+ FpuReservedEncoding {db 0dfh, 0d6h}, { fstp st6 }
+ FpuReservedEncoding {db 0dfh, 0d7h}, { fstp st7 }
+ FpuReservedEncoding {db 0dfh, 0d8h}, { fstp st0 }
+ FpuReservedEncoding {db 0dfh, 0d9h}, { fstp st1 }
+ FpuReservedEncoding {db 0dfh, 0dah}, { fstp st2 }
+ FpuReservedEncoding {db 0dfh, 0dbh}, { fstp st3 }
+ FpuReservedEncoding {db 0dfh, 0dch}, { fstp st4 }
+ FpuReservedEncoding {db 0dfh, 0ddh}, { fstp st5 }
+ FpuReservedEncoding {db 0dfh, 0deh}, { fstp st6 }
+ FpuReservedEncoding {db 0dfh, 0dfh}, { fstp st7 }
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0e1h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0e2h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0e3h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0e4h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0e5h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0e6h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0e7h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0f8h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0f9h
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0fah
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0fbh
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0fch
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0fdh
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0feh
+ ShouldTrap X86_XCPT_UD, db 0dfh, 0ffh
+
+
+.success:
+ xor eax, eax
+.return:
+ SAVE_ALL_EPILOGUE
+ ret
+
+ENDPROC x861_Test6
+
+
+;;
+; Tests some floating point exceptions and such.
+;
+;
+;
+BEGINPROC x861_Test7
+ SAVE_ALL_PROLOGUE
+ sub xSP, 2048
+
+ ; Load some pointers.
+ lea xSI, [REF(g_r32V1)]
+ mov xDI, [REF_EXTERN(g_pbEfExecPage)]
+ add xDI, PAGE_SIZE ; invalid page.
+
+ ;
+ ; Check denormal numbers.
+ ; Turns out the number is loaded onto the stack even if an exception is triggered.
+ ;
+ fninit
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fldcw [xSP]
+ FpuShouldTrap X86_FSW_DE, 0, fld dword [REF(g_r32D0)]
+ CheckSt0Value 0x00000000, 0x80000000, 0x3f7f
+
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST | X86_FCW_DM
+ fldcw [xSP]
+ fld dword [REF(g_r32D0)]
+ fwait
+ FpuCheckFSW X86_FSW_DE, 0
+ CheckSt0Value 0x00000000, 0x80000000, 0x3f7f
+
+ ;
+ ; stack overflow
+ ;
+ fninit
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fldcw [xSP]
+ fld qword [REF(g_r64V1)]
+ fld dword [xSI]
+ fld dword [xSI]
+ fld dword [xSI]
+ fld dword [xSI]
+ fld dword [xSI]
+ fld dword [xSI]
+ fld tword [REF(g_r80V1)]
+ fwait
+
+ FpuShouldTrap X86_FSW_IE | X86_FSW_SF | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3, \
+ fld dword [xSI]
+ CheckSt0Value_Eight
+
+ FpuShouldTrap X86_FSW_IE | X86_FSW_SF | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3, \
+ fld dword [xSI]
+ CheckSt0Value_Eight
+
+ ; stack overflow vs #PF.
+ ShouldTrap X86_XCPT_PF, fld dword [xDI]
+ fwait
+
+ ; stack overflow vs denormal number
+ FpuShouldTrap X86_FSW_IE | X86_FSW_SF | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3, \
+ fld dword [xSI]
+ CheckSt0Value_Eight
+
+ ;
+ ; Mask the overflow exception. We should get QNaN now regardless of
+ ; what we try to push (provided the memory is valid).
+ ;
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST | X86_FCW_IM
+ fldcw [xSP]
+
+ fld dword [xSI]
+ FpuCheckFSW X86_FSW_IE | X86_FSW_SF | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ fnclex
+ CheckSt0Value 0x00000000, 0xc0000000, 0xffff
+
+ fld qword [REF(g_r64V1)]
+ FpuCheckFSW X86_FSW_IE | X86_FSW_SF | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ fnclex
+ CheckSt0Value 0x00000000, 0xc0000000, 0xffff
+
+ ; This is includes denormal values.
+ fld dword [REF(g_r32D0)]
+ fwait
+ FpuCheckFSW X86_FSW_IE | X86_FSW_SF | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value 0x00000000, 0xc0000000, 0xffff
+ fnclex
+
+ ;
+ ; #PF vs previous stack overflow. I.e. whether pending FPU exception
+ ; is checked before fetching memory operands.
+ ;
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fldcw [xSP]
+ fld qword [REF(g_r64V1)]
+ ShouldTrap X86_XCPT_MF, fld dword [xDI]
+ fnclex
+
+ ;
+ ; What happens when we unmask an exception and fwait?
+ ;
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST | X86_FCW_IM
+ fldcw [xSP]
+ fld dword [xSI]
+ fwait
+ FpuCheckFSW X86_FSW_IE | X86_FSW_SF | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fldcw [xSP]
+ FpuCheckFSW X86_FSW_ES | X86_FSW_B | X86_FSW_IE | X86_FSW_SF | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+
+ ShouldTrap X86_XCPT_MF, fwait
+ ShouldTrap X86_XCPT_MF, fwait
+ ShouldTrap X86_XCPT_MF, fwait
+ fnclex
+
+
+.success:
+ xor eax, eax
+.return:
+ add xSP, 2048
+ SAVE_ALL_EPILOGUE
+ ret
+ENDPROC x861_Test7
+
+
+extern NAME(RTTestISub)
+
+;;
+; Sets the current subtest.
+%macro SetSubTest 1
+%ifdef RT_ARCH_AMD64
+ %ifdef ASM_CALL64_GCC
+ lea rdi, [%%s_szName wrt rip]
+ %else
+ lea rcx, [%%s_szName wrt rip]
+ %endif
+ call NAME(RTTestISub)
+%else
+ %ifdef RT_OS_DARWIN
+ sub esp, 12
+ push %%s_szName
+ call NAME(RTTestISub)
+ add esp, 16
+ %else
+ push %%s_szName
+ call NAME(RTTestISub)
+ add esp, 4
+ %endif
+%endif
+ jmp %%done
+%%s_szName:
+ db %1, 0
+%%done:
+%endmacro
+
+
+;;
+; Checks the opcode and CS:IP FPU.
+;
+; @returns ZF=1 on success, ZF=0 on failure.
+; @param xSP + xCB fxsave image followed by fnstenv.
+; @param xCX Opcode address (no prefixes).
+;
+CheckOpcodeCsIp:
+ push xBP
+ mov xBP, xSP
+ push xAX
+
+ ; Check the IP.
+%ifdef RT_ARCH_AMD64
+ cmp rcx, [xBP + xCB*2 + X86FXSTATE.FPUIP]
+%else
+ cmp ecx, [xBP + xCB*2 + X86FXSTATE.FPUIP]
+%endif
+ jne .failure1
+
+.check_fpucs:
+ mov ax, cs
+ cmp ax, [xBP + xCB*2 + 512 + X86FSTENV32P.FPUCS]
+ jne .failure2
+
+ ; Check the opcode. This may be disabled.
+ mov ah, [xCX]
+ mov al, [xCX + 1]
+ and ax, 07ffh
+
+ cmp ax, [xBP + xCB*2 + X86FXSTATE.FOP]
+ je .success
+ cmp ax, [xBP + xCB*2 + 512 + X86FSTENV32P.FOP]
+ je .success
+
+; xor ax, ax
+; cmp ax, [xBP + xCB*2 + X86FXSTATE.FOP]
+; jne .failure3
+
+.success:
+ xor eax, eax ; clear Z
+.return:
+ pop xAX
+ leave
+ ret
+
+.failure1:
+ ; AMD64 doesn't seem to store anything at IP and DP, so use the
+ ; fnstenv image instead even if that only contains the lower 32-bit.
+ xor eax, eax
+ cmp xAX, [xBP + xCB*2 + X86FXSTATE.FPUIP]
+ jne .failure1_for_real
+ cmp xAX, [xBP + xCB*2 + X86FXSTATE.FPUDP]
+ jne .failure1_for_real
+ cmp ecx, [xBP + xCB*2 + 512 + X86FSTENV32P.FPUIP]
+ je .check_fpucs
+.failure1_for_real:
+ mov eax, 10000000
+ jmp .failure
+.failure2:
+ mov eax, 20000000
+ jmp .failure
+.failure3:
+ mov eax, 30000000
+ jmp .failure
+.failure:
+ or eax, eax
+ leave
+ ret
+
+;;
+; Checks a FPU instruction, no memory operand.
+;
+; @uses xCX, xAX, Stack.
+;
+%macro FpuCheckOpcodeCsIp 1
+ mov dword [xSP + X86FXSTATE.FPUIP], 0
+ mov dword [xSP + X86FXSTATE.FPUCS], 0
+ mov dword [xSP + X86FXSTATE.FPUDP], 0
+ mov dword [xSP + X86FXSTATE.FPUDS], 0
+%%instruction:
+ %1
+ arch_fxsave [xSP]
+ fnstenv [xSP + 512] ; for the selectors (64-bit)
+ arch_fxrstor [xSP] ; fnstenv screws up the ES bit.
+ lea xCX, [REF(%%instruction)]
+ call CheckOpcodeCsIp
+ jz %%ok
+ lea xAX, [xAX + __LINE__]
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks a trapping FPU instruction, no memory operand.
+;
+; Upon return, there is are two FXSAVE image on the stack at xSP.
+;
+; @uses xCX, xAX, Stack.
+;
+; @param %1 The instruction.
+;
+%macro FpuTrapOpcodeCsIp 1
+ mov dword [xSP + 1024 + 512 + X86FXSTATE.FPUIP], 0
+ mov dword [xSP + 1024 + 512 + X86FXSTATE.FPUCS], 0
+ mov dword [xSP + 1024 + 512 + X86FXSTATE.FPUDP], 0
+ mov dword [xSP + 1024 + 512 + X86FXSTATE.FPUDS], 0
+ mov dword [xSP + X86FXSTATE.FPUIP], 0
+ mov dword [xSP + X86FXSTATE.FPUCS], 0
+ mov dword [xSP + X86FXSTATE.FPUDP], 0
+ mov dword [xSP + X86FXSTATE.FPUDS], 0
+%%instruction:
+ %1
+ fxsave [xSP + 1024 +512] ; FPUDS and FPUCS for 64-bit hosts.
+ ; WEIRD: When saved after FWAIT they are ZEROed! (64-bit Intel)
+ arch_fxsave [xSP]
+ fnstenv [xSP + 512]
+ arch_fxrstor [xSP]
+%%trap:
+ fwait
+%%trap_end:
+ mov eax, __LINE__
+ jmp .return
+BEGINDATA
+%%trapinfo: istruc TRAPINFO
+ at TRAPINFO.uTrapPC, RTCCPTR_DEF %%trap
+ at TRAPINFO.uResumePC, RTCCPTR_DEF %%resume
+ at TRAPINFO.u8TrapNo, db X86_XCPT_MF
+ at TRAPINFO.cbInstr, db (%%trap_end - %%trap)
+iend
+BEGINCODE
+%%resume:
+ lea xCX, [REF(%%instruction)]
+ call CheckOpcodeCsIp
+ jz %%ok
+ lea xAX, [xAX + __LINE__]
+ jmp .return
+%%ok:
+%endmacro
+
+
+
+
+;;
+; Checks the opcode, CS:IP and DS:DP of the FPU.
+;
+; @returns ZF=1 on success, ZF=0+EAX on failure.
+; @param xSP + xCB fxsave image followed by fnstenv.
+; @param xCX Opcode address (no prefixes).
+; @param xDX Memory address (DS relative).
+;
+CheckOpcodeCsIpDsDp:
+ push xBP
+ mov xBP, xSP
+ push xAX
+
+ ; Check the memory operand.
+%ifdef RT_ARCH_AMD64
+ cmp rdx, [xBP + xCB*2 + X86FXSTATE.FPUDP]
+%else
+ cmp edx, [xBP + xCB*2 + X86FXSTATE.FPUDP]
+%endif
+ jne .failure1
+
+.check_fpuds:
+ mov ax, ds
+ cmp ax, [xBP + xCB*2 + 512 + X86FSTENV32P.FPUDS]
+ jne .failure2
+
+.success:
+ pop xAX
+ leave
+ ; Let CheckOpcodeCsIp to the rest.
+ jmp CheckOpcodeCsIp
+
+.failure1:
+ ; AMD may leave all fields as ZERO in the FXSAVE image - figure
+ ; if there is a flag controlling this anywhere...
+ xor eax, eax
+ cmp xAX, [xBP + xCB*2 + X86FXSTATE.FPUDP]
+ jne .failure1_for_real
+ cmp xAX, [xBP + xCB*2 + X86FXSTATE.FPUIP]
+ jne .failure1_for_real
+ cmp edx, [xBP + xCB*2 + 512 + X86FSTENV32P.FPUDP]
+ je .check_fpuds
+.failure1_for_real:
+ mov eax, 60000000
+ jmp .failure
+.failure2:
+ mov eax, 80000000
+.failure:
+ or eax, eax
+ leave
+ ret
+
+
+;;
+; Checks a FPU instruction taking a memory operand.
+;
+; @uses xCX, xDX, xAX, Stack.
+;
+%macro FpuCheckOpcodeCsIpDsDp 2
+ mov dword [xSP + X86FXSTATE.FPUIP], 0
+ mov dword [xSP + X86FXSTATE.FPUCS], 0
+ mov dword [xSP + X86FXSTATE.FPUDP], 0
+ mov dword [xSP + X86FXSTATE.FPUDS], 0
+%%instruction:
+ %1
+ arch_fxsave [xSP]
+ fnstenv [xSP + 512] ; for the selectors (64-bit)
+ arch_fxrstor [xSP] ; fnstenv screws up the ES bit.
+ lea xDX, %2
+ lea xCX, [REF(%%instruction)]
+ call CheckOpcodeCsIpDsDp
+ jz %%ok
+ lea xAX, [xAX + __LINE__]
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks a trapping FPU instruction taking a memory operand.
+;
+; Upon return, there is are two FXSAVE image on the stack at xSP.
+;
+; @uses xCX, xDX, xAX, Stack.
+;
+; @param %1 The instruction.
+; @param %2 Operand memory address (DS relative).
+;
+%macro FpuTrapOpcodeCsIpDsDp 2
+ mov dword [xSP + X86FXSTATE.FPUIP], 0
+ mov dword [xSP + X86FXSTATE.FPUCS], 0
+ mov dword [xSP + X86FXSTATE.FPUDP], 0
+ mov dword [xSP + X86FXSTATE.FPUDS], 0
+%%instruction:
+ %1
+ fxsave [xSP + 1024 +512] ; FPUDS and FPUCS for 64-bit hosts.
+ ; WEIRD: When saved after FWAIT they are ZEROed! (64-bit Intel)
+ arch_fxsave [xSP]
+ fnstenv [xSP + 512]
+ arch_fxrstor [xSP]
+%%trap:
+ fwait
+%%trap_end:
+ mov eax, __LINE__
+ jmp .return
+BEGINDATA
+%%trapinfo: istruc TRAPINFO
+ at TRAPINFO.uTrapPC, RTCCPTR_DEF %%trap
+ at TRAPINFO.uResumePC, RTCCPTR_DEF %%resume
+ at TRAPINFO.u8TrapNo, db X86_XCPT_MF
+ at TRAPINFO.cbInstr, db (%%trap_end - %%trap)
+iend
+BEGINCODE
+%%resume:
+ lea xDX, %2
+ lea xCX, [REF(%%instruction)]
+ call CheckOpcodeCsIpDsDp
+ jz %%ok
+ lea xAX, [xAX + __LINE__]
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Checks that the FPU and GReg state is completely unchanged after an instruction
+; resulting in a CPU trap.
+;
+; @param 1 The trap number.
+; @param 2+ The instruction which should trap.
+;
+%macro FpuCheckCpuTrapUnchangedState 2+
+ call SaveFPUAndGRegsToStack
+ ShouldTrap %1, %2
+ call CompareFPUAndGRegsOnStack
+ jz %%ok
+ lea xAX, [xAX + __LINE__]
+ jmp .return
+%%ok:
+%endmacro
+
+
+;;
+; Initialize the FPU and set CW to %1.
+;
+; @uses dword at [xSP].
+;
+%macro FpuInitWithCW 1
+ call x861_LoadUniqueRegValuesSSE
+ fninit
+ mov dword [xSP], %1
+ fldcw [xSP]
+%endmacro
+
+
+;;
+; First bunch of FPU instruction tests.
+;
+;
+BEGINPROC x861_TestFPUInstr1
+ SAVE_ALL_PROLOGUE
+ sub xSP, 2048
+%if 0
+ ;
+ ; FDIV with 64-bit floating point memory operand.
+ ;
+ SetSubTest "FDIV m64r"
+
+ ; ## Normal operation. ##
+
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_3dot2)] }, [REF(g_r32_3dot2)]
+ CheckSt0Value 0x00000000, 0xcccccd00, 0x4000
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_One)] }, [REF(g_r64_One)]
+ FpuCheckFSW 0, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value 0x00000000, 0xcccccd00, 0x4000
+
+
+ ; ## Masked exceptions. ##
+
+ ; Masked stack underflow.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_One)] }, [REF(g_r64_One)]
+ FpuCheckFSW X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value_QNaN
+
+ ; Masked zero divide.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_3dot2)] }, [REF(g_r32_3dot2)]
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Zero)] }, [REF(g_r64_Zero)]
+ FpuCheckFSW X86_FSW_ZE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value_PlusInf
+
+ ; Masked Inf/Inf.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_Inf)] }, [REF(g_r32_Inf)]
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Inf)] }, [REF(g_r64_Inf)]
+ FpuCheckFSW X86_FSW_IE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value_QNaN
+
+ ; Masked 0/0.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_Zero)] }, [REF(g_r32_Zero)]
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Zero)] }, [REF(g_r64_Zero)]
+ FpuCheckFSW X86_FSW_IE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value_QNaN
+
+ ; Masked precision exception, rounded down.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_Ten)] }, [REF(g_r32_Ten)]
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Three)] }, [REF(g_r64_Three)]
+ FpuCheckFSW X86_FSW_PE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value_3_and_a_3rd
+
+ ; Masked precision exception, rounded up.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_Eleven)] }, [REF(g_r32_Eleven)]
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Three)] }, [REF(g_r64_Three)]
+ FpuCheckFSW X86_FSW_PE | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value_3_and_two_3rds
+
+ ; Masked overflow exception.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld tword [REF(g_r80_Max)] }, [REF(g_r80_Max)]
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_0dot1)] }, [REF(g_r64_0dot1)]
+ FpuCheckFSW X86_FSW_PE | X86_FSW_OE | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value_PlusInf
+
+ ; Masked underflow exception.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld tword [REF(g_r80_Min)] }, [REF(g_r80_Min)]
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Ten)] }, [REF(g_r64_Ten)]
+ FpuCheckFSW X86_FSW_PE | X86_FSW_UE | X86_FSW_C1, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckSt0Value 0xcccccccd, 0x0ccccccc, 0x0000
+
+ ; Denormal operand.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld tword [REF(g_r80_One)] }, [REF(g_r80_One)]
+ FpuCheckOpcodeCsIpDsDp { fdiv qword [REF(g_r64_DnMax)] }, [REF(g_r64_DnMax)]
+ FxSaveCheckFSW xSP, X86_FSW_DE | X86_FSW_PE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value xSP, 0x00000800, 0x80000000, 0x43fd
+
+ ; ## Unmasked exceptions. ##
+
+ ; Stack underflow - TOP and ST0 unmodified.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_One)] }, [REF(g_r64_One)]
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF | X86_FSW_B | X86_FSW_ES, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0EmptyInitValue xSP
+
+ ; Zero divide - Unmodified ST0.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_3dot2)] }, [REF(g_r32_3dot2)]
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Zero)] }, [REF(g_r64_Zero)]
+ FxSaveCheckFSW xSP, X86_FSW_ZE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0ValueConst xSP, REF(g_r80_r32_3dot2)
+
+ ; Invalid Operand (Inf/Inf) - Unmodified ST0.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_Inf)] }, [REF(g_r32_Inf)]
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Inf)] }, [REF(g_r64_Inf)]
+ FpuCheckFSW X86_FSW_IE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0ValueConst xSP, REF(g_r80_Inf)
+
+ ; Invalid Operand (0/0) - Unmodified ST0.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_Zero)] }, [REF(g_r32_Zero)]
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Zero)] }, [REF(g_r64_Zero)]
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0ValueConst xSP, REF(g_r80_Zero)
+
+ ; Precision exception, rounded down.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_Ten)] }, [REF(g_r32_Ten)]
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Three)] }, [REF(g_r64_Three)]
+ FxSaveCheckFSW xSP, X86_FSW_PE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value_3_and_a_3rd(xSP)
+
+ ; Precision exception, rounded up.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_Eleven)] }, [REF(g_r32_Eleven)]
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Three)] }, [REF(g_r64_Three)]
+ FxSaveCheckFSW xSP, X86_FSW_PE | X86_FSW_C1 | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value_3_and_two_3rds(xSP)
+
+ ; Overflow exception.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld tword [REF(g_r80_Max)] }, [REF(g_r80_Max)]
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_0dot1)] }, [REF(g_r64_0dot1)]
+ FxSaveCheckFSW xSP, X86_FSW_PE | X86_FSW_OE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value xSP, 0xfffffd7f, 0x9fffffff, 0x2002
+
+ ; Underflow exception.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld tword [REF(g_r80_Min)] }, [REF(g_r80_Min)]
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_Ten)] }, [REF(g_r64_Ten)]
+ FxSaveCheckFSW xSP, X86_FSW_PE | X86_FSW_UE | X86_FSW_C1 | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value xSP, 0xcccccccd, 0xcccccccc, 0x5ffd
+
+ ; Denormal operand - Unmodified ST0.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld tword [REF(g_r80_One)] }, [REF(g_r80_One)]
+ FpuTrapOpcodeCsIpDsDp { fdiv qword [REF(g_r64_DnMax)] }, [REF(g_r64_DnMax)]
+ FxSaveCheckFSW xSP, X86_FSW_DE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0ValueConst xSP, REF(g_r80_One)
+
+ ;;; @todo exception priority checks.
+
+
+
+ ; ## A couple of variations on the #PF theme. ##
+
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ mov xBX, [REF_EXTERN(g_pbEfExecPage)]
+ FpuCheckCpuTrapUnchangedState X86_XCPT_PF, fdiv qword [xBX + PAGE_SIZE]
+
+ ; Check that a pending FPU exception takes precedence over a #PF.
+ fninit
+ fdiv qword [REF(g_r64_One)]
+ fstcw [xSP]
+ and word [xSP], ~(X86_FCW_IM)
+ fldcw [xSP]
+ mov xBX, [REF_EXTERN(g_pbEfExecPage)]
+ ShouldTrap X86_XCPT_MF, fdiv qword [xBX + PAGE_SIZE]
+
+ ;
+ ; FSUBRP STn, ST0
+ ;
+ SetSubTest "FSUBRP STn, ST0"
+
+ ; ## Normal operation. ##
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_3dot2)] }, [REF(g_r32_3dot2)]
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_3dot2)] }, [REF(g_r32_3dot2)]
+ FpuCheckOpcodeCsIp { fsubrp st1, st0 }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckSt0ValueConst xSP, REF(g_r80_Zero)
+
+ ; ## Masked exceptions. ##
+
+ ; Masked stack underflow, both operands.
+ fninit
+ FpuCheckOpcodeCsIp { fsubrp st1, st0 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value_QNaN(xSP)
+
+ ; Masked stack underflow, one operand.
+ fninit
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_3dot2)] }, [REF(g_r32_3dot2)]
+ FpuCheckOpcodeCsIp { fsubrp st1, st0 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value_QNaN(xSP)
+
+ ; Denormal operand.
+ fninit
+ fld tword [REF(g_r80_DnMax)]
+ fld tword [REF(g_r80_DnMin)]
+ FpuCheckOpcodeCsIp { fsubrp st1, st0 }
+ FxSaveCheckFSW xSP, X86_FSW_DE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value xSP, 0xfffffffe, 0x7fffffff, 0x8000
+
+ ; ## Unmasked exceptions. ##
+
+ ; Stack underflow, both operands - no pop or change.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuTrapOpcodeCsIp { fsubrp st1, st0 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0EmptyInitValue xSP
+
+ ; Stack underflow, one operand - no pop or change.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ FpuCheckOpcodeCsIpDsDp { fld dword [REF(g_r32_3dot2)] }, [REF(g_r32_3dot2)]
+ FpuTrapOpcodeCsIp { fsubrp st1, st0 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0ValueConst xSP, REF(g_r80_r32_3dot2)
+
+ ; Denormal operand - no pop.
+ fninit
+ fld tword [REF(g_r80_DnMax)]
+ fld tword [REF(g_r80_DnMin)]
+ fnclex
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fldcw [xSP]
+ FpuTrapOpcodeCsIp { fsubrp st1, st0 }
+ FxSaveCheckFSW xSP, X86_FSW_DE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_DnMax)
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_DnMin)
+
+ ;
+ ; FSTP ST0, STn
+ ;
+ SetSubTest "FSTP ST0, STn"
+
+ ; ## Normal operation. ##
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_0dot1)]
+ fld tword [REF(g_r80_3dot2)]
+ FpuCheckOpcodeCsIp { fstp st2 }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_0dot1)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_3dot2)
+
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_Max)]
+ fld tword [REF(g_r80_Inf)]
+ FpuCheckOpcodeCsIp { fstp st3 }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_Max)
+ FxSaveCheckStNValueConst xSP, 2, REF(g_r80_Inf)
+
+ ; Denormal register values doesn't matter get reasserted.
+ fninit
+ fld tword [REF(g_r80_DnMin)]
+ fld tword [REF(g_r80_DnMax)]
+ fnclex
+ mov dword [xSP], X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fldcw [xSP]
+ FpuCheckOpcodeCsIp { fstp st2 }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_DnMin)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_DnMax)
+
+ ; Signaled NaN doesn't matter.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_SNaN)]
+ fld tword [REF(g_r80_SNaN)]
+ fnclex
+ FpuCheckOpcodeCsIp { fstp st3 }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_SNaN)
+ FxSaveCheckStNValueConst xSP, 2, REF(g_r80_SNaN)
+
+ ; Quiet NaN doesn't matter either
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_QNaN)]
+ fld tword [REF(g_r80_QNaN)]
+ fnclex
+ FpuCheckOpcodeCsIp { fstp st4 }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_QNaN)
+ FxSaveCheckStNValueConst xSP, 3, REF(g_r80_QNaN)
+
+ ; There is no overflow signalled.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_SNaNMax)]
+ fld tword [REF(g_r80_SNaNMax)]
+ fnclex
+ FpuCheckOpcodeCsIp { fstp st1 }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_SNaNMax)
+
+ ; ## Masked exceptions. ##
+
+ ; Masked stack underflow.
+ fninit
+ FpuCheckOpcodeCsIp { fstp st1 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Value_QNaN(xSP)
+
+ fninit
+ FpuCheckOpcodeCsIp { fstp st0 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Empty xSP
+
+ ; ## Unmasked exceptions. ##
+
+ ; Stack underflow - no pop or change.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_0dot1)]
+ fld tword [REF(g_r80_3dot2)]
+ fld tword [REF(g_r80_Ten)]
+ ffree st0
+ FpuTrapOpcodeCsIp { fstp st1 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Empty xSP
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_3dot2)
+ FxSaveCheckStNValueConst xSP, 2, REF(g_r80_0dot1)
+%endif
+
+ ;
+ ; FSTP M32R, ST0
+ ;
+ SetSubTest "FSTP M32R, ST0"
+
+ mov xBX, [REF_EXTERN(g_pbEfExecPage)]
+ lea xBX, [xBX + PAGE_SIZE - 4]
+
+ ; ## Normal operation. ##
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld dword [REF(g_r32_Ten)]
+ FpuCheckOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckSt0Empty xSP
+ CheckMemoryR32ValueConst xBX, REF(g_r32_Ten)
+
+ ; ## Masked exceptions. ##
+
+ ; Masked stack underflow.
+ fninit
+ FpuCheckOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryR32ValueConst xBX, REF(g_r32_NegQNaN)
+
+ fninit
+ fld tword [REF(g_r80_0dot1)]
+ fld tword [REF(g_r80_3dot2)]
+ fld tword [REF(g_r80_Ten)]
+ ffree st0
+ FpuCheckOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryR32ValueConst xBX, REF(g_r32_NegQNaN)
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_3dot2)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_0dot1)
+
+ ; Masked #IA caused by SNaN.
+ fninit
+ fld tword [REF(g_r80_SNaN)]
+ FpuCheckOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryR32ValueConst xBX, REF(g_r32_QNaN)
+
+ ; Masked #U caused by a denormal value.
+ fninit
+ fld tword [REF(g_r80_DnMin)]
+ FpuCheckOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_UE | X86_FSW_PE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryR32ValueConst xBX, REF(g_r32_Zero)
+
+ ; Masked #P caused by a decimal value.
+ fninit
+ fld tword [REF(g_r80_3dot2)]
+ FpuCheckOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_C1 | X86_FSW_PE, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryR32ValueConst xBX, REF(g_r32_3dot2)
+
+ ; ## Unmasked exceptions. ##
+
+ ; Stack underflow - nothing stored or popped.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ mov dword [xBX], 0xffeeddcc
+ FpuTrapOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryValue dword, xBX, 0xffeeddcc
+
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_0dot1)]
+ fld tword [REF(g_r80_3dot2)]
+ fld tword [REF(g_r80_Ten)]
+ ffree st0
+ mov dword [xBX], 0xffeeddcc
+ FpuTrapOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryValue dword, xBX, 0xffeeddcc
+ FxSaveCheckStNEmpty xSP, 0
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_3dot2)
+ FxSaveCheckStNValueConst xSP, 2, REF(g_r80_0dot1)
+
+ ; #IA caused by SNaN.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_SNaN)]
+ mov dword [xBX], 0xffeeddcc
+ FpuTrapOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryValue dword, xBX, 0xffeeddcc
+
+ ; #U caused by a denormal value - nothing written
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_DnMin)]
+ mov dword [xBX], 0xffeeddcc
+ FpuTrapOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_UE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryValue dword, xBX, 0xffeeddcc
+
+ ; #U caused by a small value - nothing written
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_Min)]
+ mov dword [xBX], 0xffeeddcc
+ FpuTrapOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_UE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryValue dword, xBX, 0xffeeddcc
+
+ ; #O caused by a small value - nothing written
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_Max)]
+ mov dword [xBX], 0xffeeddcc
+ FpuTrapOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_OE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryValue dword, xBX, 0xffeeddcc
+
+ ; #P caused by a decimal value - rounded value is written just like if it was masked.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_3dot2)]
+ mov dword [xBX], 0xffeeddcc
+ FpuTrapOpcodeCsIp { fstp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_C1 | X86_FSW_PE | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryR32ValueConst xBX, REF(g_r32_3dot2)
+
+%if 0 ;; @todo implement me
+ ;
+ ; FISTP M32I, ST0
+ ;
+ SetSubTest "FISTP M32I, ST0"
+
+ mov xBX, [REF_EXTERN(g_pbEfExecPage)]
+ lea xBX, [xBX + PAGE_SIZE - 4]
+
+ ; ## Normal operation. ##
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_Ten)]
+ FpuCheckOpcodeCsIp { fistp dword [xBX] }
+ FxSaveCheckFSW xSP, 0, 0
+ FxSaveCheckSt0Empty xSP
+ CheckMemoryValue dword, xBX, 10
+
+ ; ## Masked exceptions. ##
+
+ ; Masked stack underflow.
+ fninit
+ FpuCheckOpcodeCsIp { fistp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryValue dword, xBX, 0x80000000
+
+ fninit
+ fld tword [REF(g_r80_0dot1)]
+ fld tword [REF(g_r80_3dot2)]
+ fld tword [REF(g_r80_Ten)]
+ ffree st0
+ FpuCheckOpcodeCsIp { fistp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ CheckMemoryValue dword, xBX, 0x80000000
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_3dot2)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_0dot1)
+
+ ; ## Unmasked exceptions. ##
+
+ ; Stack underflow - no pop or change.
+ FpuInitWithCW X86_FCW_PC_64 | X86_FCW_RC_NEAREST
+ fld tword [REF(g_r80_0dot1)]
+ fld tword [REF(g_r80_3dot2)]
+ fld tword [REF(g_r80_Ten)]
+ ffree st0
+ mov dword [xBX], 0xffeeddcc
+ FpuTrapOpcodeCsIp { fistp dword [xBX] }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF | X86_FSW_ES | X86_FSW_B, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckSt0Empty xSP
+ CheckMemoryValue dword, xBX, 0xffeeddcc
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_3dot2)
+ FxSaveCheckStNValueConst xSP, 2, REF(g_r80_0dot1)
+%endif
+%if 0
+ ;
+ ; FPTAN - calc, store ST0, push 1.0.
+ ;
+ SetSubTest "FPTAN"
+
+ ; ## Normal operation. ##
+ fninit
+ fldpi
+ FpuCheckOpcodeCsIp { fptan }
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_One)
+ FxSaveCheckStNValue xSP, 1, 0x00000000, 0x80000000, 0x3fbf ; should be zero, so, this might fail due to precision later.
+
+ ; Masked stack underflow - two QNaNs.
+ fninit
+ FpuCheckOpcodeCsIp { fptan }
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_NegQNaN)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_NegQNaN)
+
+ ; Masked stack overflow - two QNaNs
+ fninit
+ fldpi
+ fldpi
+ fldpi
+ fldpi
+ fldpi
+ fldpi
+ fldpi
+ fldpi
+ FpuCheckOpcodeCsIp { fptan }
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_NegQNaN)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_NegQNaN)
+
+ ;; @todo Finish FPTAN testcase.
+
+ ;
+ ; FCMOVB - move if CF=1.
+ ;
+ SetSubTest "FCMOVB ST0,STn"
+
+ ; ## Normal operation. ##
+ fninit
+ fldz
+ fldpi
+ call SetFSW_C0_thru_C3
+ stc
+ FpuCheckOpcodeCsIp { fcmovb st0,st1 }
+ FxSaveCheckFSW xSP, X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3, 0 ; seems to be preserved...
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_Zero)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_Zero)
+
+ fninit
+ fldz
+ fld1
+ call SetFSW_C0_thru_C3
+ clc
+ FpuCheckOpcodeCsIp { fcmovb st0,st1 }
+ FxSaveCheckFSW xSP, X86_FSW_C0 | X86_FSW_C1 | X86_FSW_C2 | X86_FSW_C3, 0 ; seems to be preserved...
+ FxSaveCheckStNValueConst xSP, 0, REF(g_r80_One)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_Zero)
+
+ ; ## Masked exceptions. ##
+
+ ; Masked stack underflow - both.
+ ; Note! #IE triggers regardless of the test result!
+ fninit
+ stc
+ FpuCheckOpcodeCsIp { fcmovb st0,st1 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckStNValue_QNaN(xSP, 0)
+ FxSaveCheckStNEmpty xSP, 1
+
+ fninit
+ clc
+ FpuCheckOpcodeCsIp { fcmovb st0,st1 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckStNValue_QNaN(xSP, 0)
+ FxSaveCheckStNEmpty xSP, 1
+
+ ; Masked stack underflow - source.
+ fninit
+ fldz
+ stc
+ FpuCheckOpcodeCsIp { fcmovb st0,st1 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckStNValue_QNaN(xSP, 0)
+ FxSaveCheckStNEmpty xSP, 1
+
+ fninit
+ fldz
+ stc
+ FpuCheckOpcodeCsIp { fcmovb st0,st1 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckStNValue_QNaN(xSP, 0)
+ FxSaveCheckStNEmpty xSP, 1
+
+ ; Masked stack underflow - destination.
+ fninit
+ fldz
+ fldpi
+ ffree st0
+ stc
+ FpuCheckOpcodeCsIp { fcmovb st0,st1 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckStNValue_QNaN(xSP, 0)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_Zero)
+
+ fninit
+ fldz
+ fldpi
+ ffree st0
+ clc
+ FpuCheckOpcodeCsIp { fcmovb st0,st1 }
+ FxSaveCheckFSW xSP, X86_FSW_IE | X86_FSW_SF, X86_FSW_C0 | X86_FSW_C2 | X86_FSW_C3
+ FxSaveCheckStNValue_QNaN(xSP, 0)
+ FxSaveCheckStNValueConst xSP, 1, REF(g_r80_Zero)
+
+ ;; @todo Finish FCMOVB testcase.
+%endif
+
+
+.success:
+ xor eax, eax
+.return:
+ add xSP, 2048
+ SAVE_ALL_EPILOGUE
+ ret
+
+ENDPROC x861_TestFPUInstr1
+
+
+
+
+;;
+; Terminate the trap info array with a NIL entry.
+BEGINDATA
+GLOBALNAME g_aTrapInfoExecPage
+istruc TRAPINFO
+ at TRAPINFO.uTrapPC, RTCCPTR_DEF 1
+ at TRAPINFO.uResumePC, RTCCPTR_DEF 1
+ at TRAPINFO.u8TrapNo, db 16
+ at TRAPINFO.cbInstr, db 3
+iend
+GLOBALNAME g_aTrapInfoEnd
+istruc TRAPINFO
+ at TRAPINFO.uTrapPC, RTCCPTR_DEF 0
+ at TRAPINFO.uResumePC, RTCCPTR_DEF 0
+ at TRAPINFO.u8TrapNo, db 0
+ at TRAPINFO.cbInstr, db 0
+iend
+
diff --git a/src/VBox/VMM/testcase/tstX86-FpuSaveRestore.cpp b/src/VBox/VMM/testcase/tstX86-FpuSaveRestore.cpp
new file mode 100644
index 00000000..d9f44346
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstX86-FpuSaveRestore.cpp
@@ -0,0 +1,116 @@
+/* $Id: tstX86-FpuSaveRestore.cpp $ */
+/** @file
+ * tstX86-FpuSaveRestore - Experimenting with saving and restoring FPU.
+ */
+
+/*
+ * Copyright (C) 2013-2020 Oracle Corporation
+ *
+ * This file is part of VirtualBox Open Source Edition (OSE), as
+ * available from http://www.virtualbox.org. This file is free software;
+ * you can redistribute it and/or modify it under the terms of the GNU
+ * General Public License (GPL) as published by the Free Software
+ * Foundation, in version 2 as it comes in the "COPYING" file of the
+ * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+ * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+ */
+
+
+/*********************************************************************************************************************************
+* Header Files *
+*********************************************************************************************************************************/
+#include <iprt/initterm.h>
+#include <iprt/message.h>
+#include <iprt/string.h>
+#include <iprt/test.h>
+#include <iprt/x86.h>
+
+DECLASM(void) MyFpuPrepXcpt(void);
+DECLASM(void) MyFpuSave(PX86FXSTATE pState);
+DECLASM(void) MyFpuStoreEnv(PX86FSTENV32P pEnv);
+DECLASM(void) MyFpuRestore(PX86FXSTATE pState);
+DECLASM(void) MyFpuLoadEnv(PX86FSTENV32P pEnv);
+
+int main()
+{
+ RTTEST hTest;
+ int rc = RTTestInitAndCreate("tstX86-FpuSaveRestore", &hTest);
+ if (RT_FAILURE(rc))
+ return RTEXITCODE_FAILURE;
+ RTTestBanner(hTest);
+
+ RTTestSub(hTest, "CS/DS Selector");
+
+ RTTestIPrintf(RTTESTLVL_ALWAYS, "Initial state (0x20 will be subtracted from IP):\n");
+ /* Trigger an exception to make sure we've got something to look at. */
+ MyFpuPrepXcpt();
+ static X86FXSTATE FxState;
+ MyFpuSave(&FxState);
+ static X86FSTENV32P FpuEnv;
+ MyFpuStoreEnv(&FpuEnv);
+#ifdef RT_ARCH_AMD64
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FxState IP=%#06x%04x%08x\n", FxState.Rsrvd1, FxState.CS, FxState.FPUIP);
+#else
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FxState CS:IP=%#06x:%#010x\n", FxState.CS, FxState.FPUIP);
+#endif
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FpuEnv CS:IP=%#06x:%#010x\n", FpuEnv.FPUCS, FpuEnv.FPUIP);
+
+ /* Modify the state a little so we can tell the difference. */
+ static X86FXSTATE FxState2;
+ FxState2 = FxState;
+ FxState2.FPUIP -= 0x20;
+ static X86FSTENV32P FpuEnv2;
+ FpuEnv2 = FpuEnv;
+ FpuEnv2.FPUIP -= 0x20;
+
+ /* Just do FXRSTOR. */
+ RTTestIPrintf(RTTESTLVL_ALWAYS, "Just FXRSTOR:\n");
+ MyFpuRestore(&FxState2);
+
+ static X86FXSTATE FxStateJustRestore;
+ MyFpuSave(&FxStateJustRestore);
+ static X86FSTENV32P FpuEnvJustRestore;
+ MyFpuStoreEnv(&FpuEnvJustRestore);
+#ifdef RT_ARCH_AMD64
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FxState IP=%#06x%04x%08x\n", FxStateJustRestore.Rsrvd1, FxStateJustRestore.CS, FxStateJustRestore.FPUIP);
+#else
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FxState CS:IP=%#06x:%#010x\n", FxStateJustRestore.CS, FxStateJustRestore.FPUIP);
+#endif
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FpuEnv CS:IP=%#06x:%#010x\n", FpuEnvJustRestore.FPUCS, FpuEnvJustRestore.FPUIP);
+
+
+ /* FXRSTORE + FLDENV */
+ RTTestIPrintf(RTTESTLVL_ALWAYS, "FXRSTOR first, then FLDENV:\n");
+ MyFpuRestore(&FxState2);
+ MyFpuLoadEnv(&FpuEnv2);
+
+ static X86FXSTATE FxStateRestoreLoad;
+ MyFpuSave(&FxStateRestoreLoad);
+ static X86FSTENV32P FpuEnvRestoreLoad;
+ MyFpuStoreEnv(&FpuEnvRestoreLoad);
+#ifdef RT_ARCH_AMD64
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FxState IP=%#06x%04x%08x\n", FxStateRestoreLoad.Rsrvd1, FxStateRestoreLoad.CS, FxStateRestoreLoad.FPUIP);
+#else
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FxState CS:IP=%#06x:%#010x\n", FxStateRestoreLoad.CS, FxStateRestoreLoad.FPUIP);
+#endif
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FpuEnv CS:IP=%#06x:%#010x\n", FpuEnvRestoreLoad.FPUCS, FpuEnvRestoreLoad.FPUIP);
+
+ /* Reverse the order (FLDENV + FXRSTORE). */
+ RTTestIPrintf(RTTESTLVL_ALWAYS, "FLDENV first, then FXRSTOR:\n");
+ MyFpuLoadEnv(&FpuEnv2);
+ MyFpuRestore(&FxState2);
+
+ static X86FXSTATE FxStateLoadRestore;
+ MyFpuSave(&FxStateLoadRestore);
+ static X86FSTENV32P FpuEnvLoadRestore;
+ MyFpuStoreEnv(&FpuEnvLoadRestore);
+#ifdef RT_ARCH_AMD64
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FxState IP=%#06x%04x%08x\n", FxStateLoadRestore.Rsrvd1, FxStateLoadRestore.CS, FxStateLoadRestore.FPUIP);
+#else
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FxState CS:IP=%#06x:%#010x\n", FxStateLoadRestore.CS, FxStateLoadRestore.FPUIP);
+#endif
+ RTTestIPrintf(RTTESTLVL_ALWAYS, " FpuEnv CS:IP=%#06x:%#010x\n", FpuEnvLoadRestore.FPUCS, FpuEnvLoadRestore.FPUIP);
+
+
+ return RTTestSummaryAndDestroy(hTest);
+}
diff --git a/src/VBox/VMM/testcase/tstX86-FpuSaveRestoreA.asm b/src/VBox/VMM/testcase/tstX86-FpuSaveRestoreA.asm
new file mode 100644
index 00000000..d4f989b9
--- /dev/null
+++ b/src/VBox/VMM/testcase/tstX86-FpuSaveRestoreA.asm
@@ -0,0 +1,117 @@
+; $Id: tstX86-FpuSaveRestoreA.asm $
+;; @file
+; tstX86-FpuSaveRestore - Experimenting with saving and restoring FPU, assembly bits.
+;
+
+;
+; Copyright (C) 2013-2020 Oracle Corporation
+;
+; This file is part of VirtualBox Open Source Edition (OSE), as
+; available from http://www.virtualbox.org. This file is free software;
+; you can redistribute it and/or modify it under the terms of the GNU
+; General Public License (GPL) as published by the Free Software
+; Foundation, in version 2 as it comes in the "COPYING" file of the
+; VirtualBox OSE distribution. VirtualBox OSE is distributed in the
+; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
+;
+
+
+
+;*******************************************************************************
+;* Header Files *
+;*******************************************************************************
+%include "iprt/asmdefs.mac"
+%include "iprt/x86.mac"
+
+
+;*******************************************************************************
+;* Global Variables *
+;*******************************************************************************
+BEGINCODE
+g_r80_Zero: dt 0.0
+g_r80_One: dt 1.0
+
+
+BEGINCODE
+
+;; Prepares a FPU exception.
+BEGINPROC MyFpuPrepXcpt
+ fld tword [g_r80_One xWrtRIP]
+ fld tword [g_r80_Zero xWrtRIP]
+ fdiv st0
+ ret
+ENDPROC MyFpuPrepXcpt
+
+
+;; Same as above, just different address.
+BEGINPROC MyFpuPrepXcpt2
+ fld tword [g_r80_One xWrtRIP]
+ fld tword [g_r80_Zero xWrtRIP]
+ fdiv st0
+ ret
+ENDPROC MyFpuPrepXcpt2
+
+
+BEGINPROC MyFpuSave
+%ifdef ASM_CALL64_MSC
+ o64 fxsave [rcx]
+%elifdef ASM_CALL64_GCC
+ o64 fxsave [rdi]
+%elifdef RT_ARCH_X86
+ mov ecx, [esp + 4]
+ fxsave [ecx]
+%else
+ %error "Unsupported architecture."
+ bad arch
+%endif
+ ret
+ENDPROC MyFpuSave
+
+
+BEGINPROC MyFpuStoreEnv
+%ifdef ASM_CALL64_MSC
+ fstenv [rcx]
+%elifdef ASM_CALL64_GCC
+ fstenv [rdi]
+%elifdef RT_ARCH_X86
+ mov ecx, [esp + 4]
+ fstenv [ecx]
+%else
+ %error "Unsupported architecture."
+ bad arch
+%endif
+ ret
+ENDPROC MyFpuStoreEnv
+
+
+BEGINPROC MyFpuRestore
+%ifdef ASM_CALL64_MSC
+ o64 fxrstor [rcx]
+%elifdef ASM_CALL64_GCC
+ o64 fxrstor [rdi]
+%elifdef RT_ARCH_X86
+ mov ecx, [esp + 4]
+ fxrstor [ecx]
+%else
+ %error "Unsupported architecture."
+ bad arch
+%endif
+ ret
+ENDPROC MyFpuRestore
+
+
+BEGINPROC MyFpuLoadEnv
+%ifdef ASM_CALL64_MSC
+ fldenv [rcx]
+%elifdef ASM_CALL64_GCC
+ fldenv [rdi]
+%elifdef RT_ARCH_X86
+ mov ecx, [esp + 4]
+ fldenv [ecx]
+%else
+ %error "Unsupported architecture."
+ bad arch
+%endif
+ ret
+ENDPROC MyFpuLoadEnv
+