summaryrefslogtreecommitdiffstats
path: root/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:17:27 +0000
commitf215e02bf85f68d3a6106c2a1f4f7f063f819064 (patch)
tree6bb5b92c046312c4e95ac2620b10ddf482d3fa8b /src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm
parentInitial commit. (diff)
downloadvirtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.tar.xz
virtualbox-f215e02bf85f68d3a6106c2a1f4f7f063f819064.zip
Adding upstream version 7.0.14-dfsg.upstream/7.0.14-dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm')
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c1160
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.c366
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.h175
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Cet.nasm33
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm153
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c374
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c42
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm314
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm705
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c204
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm96
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c74
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h91
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c2027
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c1470
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h1479
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf153
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.uni15
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmmExtra.uni12
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c1571
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMp.c344
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMp.h285
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c1517
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h135
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h167
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c742
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c110
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm34
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm189
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c1325
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c69
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm396
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm378
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c218
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm146
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c333
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h99
37 files changed, 17001 insertions, 0 deletions
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
new file mode 100644
index 00000000..6df07ac6
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
@@ -0,0 +1,1160 @@
+/** @file
+Code for Processor S3 restoration
+
+Copyright (c) 2006 - 2021, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+#pragma pack(1)
+typedef struct {
+ UINTN Lock;
+ VOID *StackStart;
+ UINTN StackSize;
+ VOID *ApFunction;
+ IA32_DESCRIPTOR GdtrProfile;
+ IA32_DESCRIPTOR IdtrProfile;
+ UINT32 BufferStart;
+ UINT32 Cr3;
+ UINTN InitializeFloatingPointUnitsAddress;
+} MP_CPU_EXCHANGE_INFO;
+#pragma pack()
+
+typedef struct {
+ UINT8 *RendezvousFunnelAddress;
+ UINTN PModeEntryOffset;
+ UINTN FlatJumpOffset;
+ UINTN Size;
+ UINTN LModeEntryOffset;
+ UINTN LongJumpOffset;
+} MP_ASSEMBLY_ADDRESS_MAP;
+
+//
+// Flags used when program the register.
+//
+typedef struct {
+ volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
+ volatile UINT32 *CoreSemaphoreCount; // Semaphore container used to program
+ // core level semaphore.
+ volatile UINT32 *PackageSemaphoreCount; // Semaphore container used to program
+ // package level semaphore.
+} PROGRAM_CPU_REGISTER_FLAGS;
+
+//
+// Signal that SMM BASE relocation is complete.
+//
+volatile BOOLEAN mInitApsAfterSmmBaseReloc;
+
+/**
+ Get starting address and size of the rendezvous entry for APs.
+ Information for fixing a jump instruction in the code is also returned.
+
+ @param AddressMap Output buffer for address map information.
+**/
+VOID *
+EFIAPI
+AsmGetAddressMap (
+ MP_ASSEMBLY_ADDRESS_MAP *AddressMap
+ );
+
+#define LEGACY_REGION_SIZE (2 * 0x1000)
+#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
+
+PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
+ACPI_CPU_DATA mAcpiCpuData;
+volatile UINT32 mNumberToFinish;
+MP_CPU_EXCHANGE_INFO *mExchangeInfo;
+BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
+
+//
+// S3 boot flag
+//
+BOOLEAN mSmmS3Flag = FALSE;
+
+//
+// Pointer to structure used during S3 Resume
+//
+SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL;
+
+BOOLEAN mAcpiS3Enable = TRUE;
+
+UINT8 *mApHltLoopCode = NULL;
+UINT8 mApHltLoopCodeTemplate[] = {
+ 0x8B, 0x44, 0x24, 0x04, // mov eax, dword ptr [esp+4]
+ 0xF0, 0xFF, 0x08, // lock dec dword ptr [eax]
+ 0xFA, // cli
+ 0xF4, // hlt
+ 0xEB, 0xFC // jmp $-2
+ };
+
+/**
+ Sync up the MTRR values for all processors.
+
+ @param MtrrTable Table holding fixed/variable MTRR values to be loaded.
+**/
+VOID
+EFIAPI
+LoadMtrrData (
+ EFI_PHYSICAL_ADDRESS MtrrTable
+ )
+/*++
+
+Routine Description:
+
+ Sync up the MTRR values for all processors.
+
+Arguments:
+
+Returns:
+ None
+
+--*/
+{
+ MTRR_SETTINGS *MtrrSettings;
+
+ MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable;
+ MtrrSetAllMtrrs (MtrrSettings);
+}
+
+/**
+ Increment semaphore by 1.
+
+ @param Sem IN: 32-bit unsigned integer
+
+**/
+VOID
+S3ReleaseSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ InterlockedIncrement (Sem);
+}
+
+/**
+ Decrement the semaphore by 1 if it is not zero.
+
+ Performs an atomic decrement operation for semaphore.
+ The compare exchange operation must be performed using
+ MP safe mechanisms.
+
+ @param Sem IN: 32-bit unsigned integer
+
+**/
+VOID
+S3WaitForSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (Value == 0 ||
+ InterlockedCompareExchange32 (
+ Sem,
+ Value,
+ Value - 1
+ ) != Value);
+}
+
+/**
+ Read / write CR value.
+
+ @param[in] CrIndex The CR index which need to read/write.
+ @param[in] Read Read or write. TRUE is read.
+ @param[in,out] CrValue CR value.
+
+ @retval EFI_SUCCESS means read/write success, else return EFI_UNSUPPORTED.
+**/
+UINTN
+ReadWriteCr (
+ IN UINT32 CrIndex,
+ IN BOOLEAN Read,
+ IN OUT UINTN *CrValue
+ )
+{
+ switch (CrIndex) {
+ case 0:
+ if (Read) {
+ *CrValue = AsmReadCr0 ();
+ } else {
+ AsmWriteCr0 (*CrValue);
+ }
+ break;
+ case 2:
+ if (Read) {
+ *CrValue = AsmReadCr2 ();
+ } else {
+ AsmWriteCr2 (*CrValue);
+ }
+ break;
+ case 3:
+ if (Read) {
+ *CrValue = AsmReadCr3 ();
+ } else {
+ AsmWriteCr3 (*CrValue);
+ }
+ break;
+ case 4:
+ if (Read) {
+ *CrValue = AsmReadCr4 ();
+ } else {
+ AsmWriteCr4 (*CrValue);
+ }
+ break;
+ default:
+ return EFI_UNSUPPORTED;;
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Initialize the CPU registers from a register table.
+
+ @param[in] RegisterTable The register table for this AP.
+ @param[in] ApLocation AP location info for this ap.
+ @param[in] CpuStatus CPU status info for this CPU.
+ @param[in] CpuFlags Flags data structure used when program the register.
+
+ @note This service could be called by BSP/APs.
+**/
+VOID
+ProgramProcessorRegister (
+ IN CPU_REGISTER_TABLE *RegisterTable,
+ IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
+ IN CPU_STATUS_INFORMATION *CpuStatus,
+ IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
+ )
+{
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
+ UINTN Index;
+ UINTN Value;
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
+ volatile UINT32 *SemaphorePtr;
+ UINT32 FirstThread;
+ UINT32 CurrentThread;
+ UINT32 CurrentCore;
+ UINTN ProcessorIndex;
+ UINT32 *ThreadCountPerPackage;
+ UINT8 *ThreadCountPerCore;
+ EFI_STATUS Status;
+ UINT64 CurrentValue;
+
+ //
+ // Traverse Register Table of this logical processor
+ //
+ RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
+
+ for (Index = 0; Index < RegisterTable->TableLength; Index++) {
+
+ RegisterTableEntry = &RegisterTableEntryHead[Index];
+
+ //
+ // Check the type of specified register
+ //
+ switch (RegisterTableEntry->RegisterType) {
+ //
+ // The specified register is Control Register
+ //
+ case ControlRegister:
+ Status = ReadWriteCr (RegisterTableEntry->Index, TRUE, &Value);
+ if (EFI_ERROR (Status)) {
+ break;
+ }
+ if (RegisterTableEntry->TestThenWrite) {
+ CurrentValue = BitFieldRead64 (
+ Value,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
+ );
+ if (CurrentValue == RegisterTableEntry->Value) {
+ break;
+ }
+ }
+ Value = (UINTN) BitFieldWrite64 (
+ Value,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
+ RegisterTableEntry->Value
+ );
+ ReadWriteCr (RegisterTableEntry->Index, FALSE, &Value);
+ break;
+ //
+ // The specified register is Model Specific Register
+ //
+ case Msr:
+ if (RegisterTableEntry->TestThenWrite) {
+ Value = (UINTN)AsmReadMsr64 (RegisterTableEntry->Index);
+ if (RegisterTableEntry->ValidBitLength >= 64) {
+ if (Value == RegisterTableEntry->Value) {
+ break;
+ }
+ } else {
+ CurrentValue = BitFieldRead64 (
+ Value,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1
+ );
+ if (CurrentValue == RegisterTableEntry->Value) {
+ break;
+ }
+ }
+ }
+
+ //
+ // If this function is called to restore register setting after INIT signal,
+ // there is no need to restore MSRs in register table.
+ //
+ if (RegisterTableEntry->ValidBitLength >= 64) {
+ //
+ // If length is not less than 64 bits, then directly write without reading
+ //
+ AsmWriteMsr64 (
+ RegisterTableEntry->Index,
+ RegisterTableEntry->Value
+ );
+ } else {
+ //
+ // Set the bit section according to bit start and length
+ //
+ AsmMsrBitFieldWrite64 (
+ RegisterTableEntry->Index,
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
+ RegisterTableEntry->Value
+ );
+ }
+ break;
+ //
+ // MemoryMapped operations
+ //
+ case MemoryMapped:
+ AcquireSpinLock (&CpuFlags->MemoryMappedLock);
+ MmioBitFieldWrite32 (
+ (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
+ RegisterTableEntry->ValidBitStart,
+ RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
+ (UINT32)RegisterTableEntry->Value
+ );
+ ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
+ break;
+ //
+ // Enable or disable cache
+ //
+ case CacheControl:
+ //
+ // If value of the entry is 0, then disable cache. Otherwise, enable cache.
+ //
+ if (RegisterTableEntry->Value == 0) {
+ AsmDisableCache ();
+ } else {
+ AsmEnableCache ();
+ }
+ break;
+
+ case Semaphore:
+ // Semaphore works logic like below:
+ //
+ // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
+ // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
+ //
+ // All threads (T0...Tn) waits in P() line and continues running
+ // together.
+ //
+ //
+ // T0 T1 ... Tn
+ //
+ // V(0...n) V(0...n) ... V(0...n)
+ // n * P(0) n * P(1) ... n * P(n)
+ //
+ ASSERT (
+ (ApLocation != NULL) &&
+ (CpuStatus->ThreadCountPerPackage != 0) &&
+ (CpuStatus->ThreadCountPerCore != 0) &&
+ (CpuFlags->CoreSemaphoreCount != NULL) &&
+ (CpuFlags->PackageSemaphoreCount != NULL)
+ );
+ switch (RegisterTableEntry->Value) {
+ case CoreDepType:
+ SemaphorePtr = CpuFlags->CoreSemaphoreCount;
+ ThreadCountPerCore = (UINT8 *)(UINTN)CpuStatus->ThreadCountPerCore;
+
+ CurrentCore = ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core;
+ //
+ // Get Offset info for the first thread in the core which current thread belongs to.
+ //
+ FirstThread = CurrentCore * CpuStatus->MaxThreadCount;
+ CurrentThread = FirstThread + ApLocation->Thread;
+
+ //
+ // Different cores may have different valid threads in them. If driver maintail clearly
+ // thread index in different cores, the logic will be much complicated.
+ // Here driver just simply records the max thread number in all cores and use it as expect
+ // thread number for all cores.
+ // In below two steps logic, first current thread will Release semaphore for each thread
+ // in current core. Maybe some threads are not valid in this core, but driver don't
+ // care. Second, driver will let current thread wait semaphore for all valid threads in
+ // current core. Because only the valid threads will do release semaphore for this
+ // thread, driver here only need to wait the valid thread count.
+ //
+
+ //
+ // First Notify ALL THREADs in current Core that this thread is ready.
+ //
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
+ }
+ //
+ // Second, check whether all VALID THREADs (not all threads) in current core are ready.
+ //
+ for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerCore[CurrentCore]; ProcessorIndex ++) {
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
+ }
+ break;
+
+ case PackageDepType:
+ SemaphorePtr = CpuFlags->PackageSemaphoreCount;
+ ThreadCountPerPackage = (UINT32 *)(UINTN)CpuStatus->ThreadCountPerPackage;
+ //
+ // Get Offset info for the first thread in the package which current thread belongs to.
+ //
+ FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
+ //
+ // Get the possible threads count for current package.
+ //
+ CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
+
+ //
+ // Different packages may have different valid threads in them. If driver maintail clearly
+ // thread index in different packages, the logic will be much complicated.
+ // Here driver just simply records the max thread number in all packages and use it as expect
+ // thread number for all packages.
+ // In below two steps logic, first current thread will Release semaphore for each thread
+ // in current package. Maybe some threads are not valid in this package, but driver don't
+ // care. Second, driver will let current thread wait semaphore for all valid threads in
+ // current package. Because only the valid threads will do release semaphore for this
+ // thread, driver here only need to wait the valid thread count.
+ //
+
+ //
+ // First Notify ALL THREADS in current package that this thread is ready.
+ //
+ for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount; ProcessorIndex ++) {
+ S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
+ }
+ //
+ // Second, check whether VALID THREADS (not all threads) in current package are ready.
+ //
+ for (ProcessorIndex = 0; ProcessorIndex < ThreadCountPerPackage[ApLocation->Package]; ProcessorIndex ++) {
+ S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
+ }
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
+
+ Set Processor register for one AP.
+
+ @param PreSmmRegisterTable Use pre Smm register table or register table.
+
+**/
+VOID
+SetRegister (
+ IN BOOLEAN PreSmmRegisterTable
+ )
+{
+ CPU_REGISTER_TABLE *RegisterTable;
+ CPU_REGISTER_TABLE *RegisterTables;
+ UINT32 InitApicId;
+ UINTN ProcIndex;
+ UINTN Index;
+
+ if (PreSmmRegisterTable) {
+ RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
+ } else {
+ RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
+ }
+ if (RegisterTables == NULL) {
+ return;
+ }
+
+ InitApicId = GetInitialApicId ();
+ RegisterTable = NULL;
+ ProcIndex = (UINTN)-1;
+ for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
+ if (RegisterTables[Index].InitialApicId == InitApicId) {
+ RegisterTable = &RegisterTables[Index];
+ ProcIndex = Index;
+ break;
+ }
+ }
+ ASSERT (RegisterTable != NULL);
+
+ if (mAcpiCpuData.ApLocation != 0) {
+ ProgramProcessorRegister (
+ RegisterTable,
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
+ &mAcpiCpuData.CpuStatus,
+ &mCpuFlags
+ );
+ } else {
+ ProgramProcessorRegister (
+ RegisterTable,
+ NULL,
+ &mAcpiCpuData.CpuStatus,
+ &mCpuFlags
+ );
+ }
+}
+
+/**
+ AP initialization before then after SMBASE relocation in the S3 boot path.
+**/
+VOID
+InitializeAp (
+ VOID
+ )
+{
+ UINTN TopOfStack;
+ UINT8 Stack[128];
+
+ LoadMtrrData (mAcpiCpuData.MtrrTable);
+
+ SetRegister (TRUE);
+
+ //
+ // Count down the number with lock mechanism.
+ //
+ InterlockedDecrement (&mNumberToFinish);
+
+ //
+ // Wait for BSP to signal SMM Base relocation done.
+ //
+ while (!mInitApsAfterSmmBaseReloc) {
+ CpuPause ();
+ }
+
+ ProgramVirtualWireMode ();
+ DisableLvtInterrupts ();
+
+ SetRegister (FALSE);
+
+ //
+ // Place AP into the safe code, count down the number with lock mechanism in the safe code.
+ //
+ TopOfStack = (UINTN) Stack + sizeof (Stack);
+ TopOfStack &= ~(UINTN) (CPU_STACK_ALIGNMENT - 1);
+ CopyMem ((VOID *) (UINTN) mApHltLoopCode, mApHltLoopCodeTemplate, sizeof (mApHltLoopCodeTemplate));
+ TransferApToSafeState ((UINTN)mApHltLoopCode, TopOfStack, (UINTN)&mNumberToFinish);
+}
+
+/**
+ Prepares startup vector for APs.
+
+ This function prepares startup vector for APs.
+
+ @param WorkingBuffer The address of the work buffer.
+**/
+VOID
+PrepareApStartupVector (
+ EFI_PHYSICAL_ADDRESS WorkingBuffer
+ )
+{
+ EFI_PHYSICAL_ADDRESS StartupVector;
+ MP_ASSEMBLY_ADDRESS_MAP AddressMap;
+
+ //
+ // Get the address map of startup code for AP,
+ // including code size, and offset of long jump instructions to redirect.
+ //
+ ZeroMem (&AddressMap, sizeof (AddressMap));
+ AsmGetAddressMap (&AddressMap);
+
+ StartupVector = WorkingBuffer;
+
+ //
+ // Copy AP startup code to startup vector, and then redirect the long jump
+ // instructions for mode switching.
+ //
+ CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size);
+ *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset);
+ if (AddressMap.LongJumpOffset != 0) {
+ *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset);
+ }
+
+ //
+ // Get the start address of exchange data between BSP and AP.
+ //
+ mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size);
+ ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO));
+
+ CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR));
+ CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR));
+
+ mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress;
+ mExchangeInfo->StackSize = mAcpiCpuData.StackSize;
+ mExchangeInfo->BufferStart = (UINT32) StartupVector;
+ mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ());
+ mExchangeInfo->InitializeFloatingPointUnitsAddress = (UINTN)InitializeFloatingPointUnits;
+}
+
+/**
+ The function is invoked before SMBASE relocation in S3 path to restores CPU status.
+
+ The function is invoked before SMBASE relocation in S3 path. It does first time microcode load
+ and restores MTRRs for both BSP and APs.
+
+**/
+VOID
+InitializeCpuBeforeRebase (
+ VOID
+ )
+{
+ LoadMtrrData (mAcpiCpuData.MtrrTable);
+
+ SetRegister (TRUE);
+
+ ProgramVirtualWireMode ();
+
+ PrepareApStartupVector (mAcpiCpuData.StartupVector);
+
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
+ } else {
+ ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
+ }
+ mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
+ mExchangeInfo->ApFunction = (VOID *) (UINTN) InitializeAp;
+
+ //
+ // Execute code for before SmmBaseReloc. Note: This flag is maintained across S3 boots.
+ //
+ mInitApsAfterSmmBaseReloc = FALSE;
+
+ //
+ // Send INIT IPI - SIPI to all APs
+ //
+ SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector);
+
+ while (mNumberToFinish > 0) {
+ CpuPause ();
+ }
+}
+
+/**
+ The function is invoked after SMBASE relocation in S3 path to restores CPU status.
+
+ The function is invoked after SMBASE relocation in S3 path. It restores configuration according to
+ data saved by normal boot path for both BSP and APs.
+
+**/
+VOID
+InitializeCpuAfterRebase (
+ VOID
+ )
+{
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ ASSERT (mNumberOfCpus <= mAcpiCpuData.NumberOfCpus);
+ } else {
+ ASSERT (mNumberOfCpus == mAcpiCpuData.NumberOfCpus);
+ }
+ mNumberToFinish = (UINT32)(mNumberOfCpus - 1);
+
+ //
+ // Signal that SMM base relocation is complete and to continue initialization for all APs.
+ //
+ mInitApsAfterSmmBaseReloc = TRUE;
+
+ //
+ // Must begin set register after all APs have continue their initialization.
+ // This is a requirement to support semaphore mechanism in register table.
+ // Because if semaphore's dependence type is package type, semaphore will wait
+ // for all Aps in one package finishing their tasks before set next register
+ // for all APs. If the Aps not begin its task during BSP doing its task, the
+ // BSP thread will hang because it is waiting for other Aps in the same
+ // package finishing their task.
+ //
+ SetRegister (FALSE);
+
+ while (mNumberToFinish > 0) {
+ CpuPause ();
+ }
+}
+
+/**
+ Restore SMM Configuration in S3 boot path.
+
+**/
+VOID
+RestoreSmmConfigurationInS3 (
+ VOID
+ )
+{
+ if (!mAcpiS3Enable) {
+ return;
+ }
+
+ //
+ // Restore SMM Configuration in S3 boot path.
+ //
+ if (mRestoreSmmConfigurationInS3) {
+ //
+ // Need make sure gSmst is correct because below function may use them.
+ //
+ gSmst->SmmStartupThisAp = gSmmCpuPrivate->SmmCoreEntryContext.SmmStartupThisAp;
+ gSmst->CurrentlyExecutingCpu = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
+ gSmst->NumberOfCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ gSmst->CpuSaveStateSize = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveStateSize;
+ gSmst->CpuSaveState = gSmmCpuPrivate->SmmCoreEntryContext.CpuSaveState;
+
+ //
+ // Configure SMM Code Access Check feature if available.
+ //
+ ConfigSmmCodeAccessCheck ();
+
+ SmmCpuFeaturesCompleteSmmReadyToLock ();
+
+ mRestoreSmmConfigurationInS3 = FALSE;
+ }
+}
+
+/**
+ Perform SMM initialization for all processors in the S3 boot path.
+
+ For a native platform, MP initialization in the S3 boot path is also performed in this function.
+**/
+VOID
+EFIAPI
+SmmRestoreCpu (
+ VOID
+ )
+{
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;
+ IA32_DESCRIPTOR Ia32Idtr;
+ IA32_DESCRIPTOR X64Idtr;
+ IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER];
+ EFI_STATUS Status;
+
+ DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n"));
+
+ mSmmS3Flag = TRUE;
+
+ //
+ // See if there is enough context to resume PEI Phase
+ //
+ if (mSmmS3ResumeState == NULL) {
+ DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
+ CpuDeadLoop ();
+ }
+
+ SmmS3ResumeState = mSmmS3ResumeState;
+ ASSERT (SmmS3ResumeState != NULL);
+
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
+ //
+ // Save the IA32 IDT Descriptor
+ //
+ AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
+
+ //
+ // Setup X64 IDT table
+ //
+ ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32);
+ X64Idtr.Base = (UINTN) IdtEntryTable;
+ X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1);
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr);
+
+ //
+ // Setup the default exception handler
+ //
+ Status = InitializeCpuExceptionHandlers (NULL);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Initialize Debug Agent to support source level debug
+ //
+ InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL);
+ }
+
+ //
+ // Skip initialization if mAcpiCpuData is not valid
+ //
+ if (mAcpiCpuData.NumberOfCpus > 0) {
+ //
+ // First time microcode load and restore MTRRs
+ //
+ InitializeCpuBeforeRebase ();
+ }
+
+ //
+ // Restore SMBASE for BSP and all APs
+ //
+ SmmRelocateBases ();
+
+ //
+ // Skip initialization if mAcpiCpuData is not valid
+ //
+ if (mAcpiCpuData.NumberOfCpus > 0) {
+ //
+ // Restore MSRs for BSP and all APs
+ //
+ InitializeCpuAfterRebase ();
+ }
+
+ //
+ // Set a flag to restore SMM configuration in S3 path.
+ //
+ mRestoreSmmConfigurationInS3 = TRUE;
+
+ DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs));
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint));
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1));
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2));
+ DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer));
+
+ //
+ // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase
+ //
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) {
+ DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n"));
+
+ SwitchStack (
+ (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint,
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1,
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2,
+ (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer
+ );
+ }
+
+ //
+ // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase
+ //
+ if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) {
+ DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n"));
+ //
+ // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode.
+ //
+ SaveAndSetDebugTimerInterrupt (FALSE);
+ //
+ // Restore IA32 IDT table
+ //
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr);
+ AsmDisablePaging64 (
+ SmmS3ResumeState->ReturnCs,
+ (UINT32)SmmS3ResumeState->ReturnEntryPoint,
+ (UINT32)SmmS3ResumeState->ReturnContext1,
+ (UINT32)SmmS3ResumeState->ReturnContext2,
+ (UINT32)SmmS3ResumeState->ReturnStackPointer
+ );
+ }
+
+ //
+ // Can not resume PEI Phase
+ //
+ DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n"));
+ CpuDeadLoop ();
+}
+
+/**
+ Initialize SMM S3 resume state structure used during S3 Resume.
+
+ @param[in] Cr3 The base address of the page tables to use in SMM.
+
+**/
+VOID
+InitSmmS3ResumeState (
+ IN UINT32 Cr3
+ )
+{
+ VOID *GuidHob;
+ EFI_SMRAM_DESCRIPTOR *SmramDescriptor;
+ SMM_S3_RESUME_STATE *SmmS3ResumeState;
+ EFI_PHYSICAL_ADDRESS Address;
+ EFI_STATUS Status;
+
+ if (!mAcpiS3Enable) {
+ return;
+ }
+
+ GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid);
+ if (GuidHob == NULL) {
+ DEBUG ((
+ DEBUG_ERROR,
+ "ERROR:%a(): HOB(gEfiAcpiVariableGuid=%g) needed by S3 resume doesn't exist!\n",
+ __FUNCTION__,
+ &gEfiAcpiVariableGuid
+ ));
+ CpuDeadLoop ();
+ } else {
+ SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob);
+
+ DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor));
+ DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart));
+
+ SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart;
+ ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE));
+
+ mSmmS3ResumeState = SmmS3ResumeState;
+ SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst;
+
+ SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu;
+
+ SmmS3ResumeState->SmmS3StackSize = SIZE_32KB;
+ SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
+ if (SmmS3ResumeState->SmmS3StackBase == 0) {
+ SmmS3ResumeState->SmmS3StackSize = 0;
+ }
+
+ SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
+ SmmS3ResumeState->SmmS3Cr3 = Cr3;
+ SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
+
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
+ }
+ if (sizeof (UINTN) == sizeof (UINT32)) {
+ SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32;
+ }
+
+ //
+ // Patch SmmS3ResumeState->SmmS3Cr3
+ //
+ InitSmmS3Cr3 ();
+ }
+
+ //
+ // Allocate safe memory in ACPI NVS for AP to execute hlt loop in
+ // protected mode on S3 path
+ //
+ Address = BASE_4GB - 1;
+ Status = gBS->AllocatePages (
+ AllocateMaxAddress,
+ EfiACPIMemoryNVS,
+ EFI_SIZE_TO_PAGES (sizeof (mApHltLoopCodeTemplate)),
+ &Address
+ );
+ ASSERT_EFI_ERROR (Status);
+ mApHltLoopCode = (UINT8 *) (UINTN) Address;
+}
+
+/**
+ Copy register table from non-SMRAM into SMRAM.
+
+ @param[in] DestinationRegisterTableList Points to destination register table.
+ @param[in] SourceRegisterTableList Points to source register table.
+ @param[in] NumberOfCpus Number of CPUs.
+
+**/
+VOID
+CopyRegisterTable (
+ IN CPU_REGISTER_TABLE *DestinationRegisterTableList,
+ IN CPU_REGISTER_TABLE *SourceRegisterTableList,
+ IN UINT32 NumberOfCpus
+ )
+{
+ UINTN Index;
+ CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
+
+ CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
+ for (Index = 0; Index < NumberOfCpus; Index++) {
+ if (DestinationRegisterTableList[Index].TableLength != 0) {
+ DestinationRegisterTableList[Index].AllocatedSize = DestinationRegisterTableList[Index].TableLength * sizeof (CPU_REGISTER_TABLE_ENTRY);
+ RegisterTableEntry = AllocateCopyPool (
+ DestinationRegisterTableList[Index].AllocatedSize,
+ (VOID *)(UINTN)SourceRegisterTableList[Index].RegisterTableEntry
+ );
+ ASSERT (RegisterTableEntry != NULL);
+ DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
+ }
+ }
+}
+
+/**
+ Check whether the register table is empty or not.
+
+ @param[in] RegisterTable Point to the register table.
+ @param[in] NumberOfCpus Number of CPUs.
+
+ @retval TRUE The register table is empty.
+ @retval FALSE The register table is not empty.
+**/
+BOOLEAN
+IsRegisterTableEmpty (
+ IN CPU_REGISTER_TABLE *RegisterTable,
+ IN UINT32 NumberOfCpus
+ )
+{
+ UINTN Index;
+
+ if (RegisterTable != NULL) {
+ for (Index = 0; Index < NumberOfCpus; Index++) {
+ if (RegisterTable[Index].TableLength != 0) {
+ return FALSE;
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+/**
+ Get ACPI CPU data.
+
+**/
+VOID
+GetAcpiCpuData (
+ VOID
+ )
+{
+ ACPI_CPU_DATA *AcpiCpuData;
+ IA32_DESCRIPTOR *Gdtr;
+ IA32_DESCRIPTOR *Idtr;
+ VOID *GdtForAp;
+ VOID *IdtForAp;
+ VOID *MachineCheckHandlerForAp;
+ CPU_STATUS_INFORMATION *CpuStatus;
+
+ if (!mAcpiS3Enable) {
+ return;
+ }
+
+ //
+ // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0
+ //
+ mAcpiCpuData.NumberOfCpus = 0;
+
+ //
+ // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM
+ //
+ AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress);
+ if (AcpiCpuData == 0) {
+ return;
+ }
+
+ //
+ // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume.
+ //
+ CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData));
+
+ mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS));
+ ASSERT (mAcpiCpuData.MtrrTable != 0);
+
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS));
+
+ mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
+ ASSERT (mAcpiCpuData.GdtrProfile != 0);
+
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR));
+
+ mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR));
+ ASSERT (mAcpiCpuData.IdtrProfile != 0);
+
+ CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR));
+
+ if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus)) {
+ mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
+ ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0);
+
+ CopyRegisterTable (
+ (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable,
+ (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable,
+ mAcpiCpuData.NumberOfCpus
+ );
+ } else {
+ mAcpiCpuData.PreSmmInitRegisterTable = 0;
+ }
+
+ if (!IsRegisterTableEmpty ((CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable, mAcpiCpuData.NumberOfCpus)) {
+ mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
+ ASSERT (mAcpiCpuData.RegisterTable != 0);
+
+ CopyRegisterTable (
+ (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable,
+ (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable,
+ mAcpiCpuData.NumberOfCpus
+ );
+ } else {
+ mAcpiCpuData.RegisterTable = 0;
+ }
+
+ //
+ // Copy AP's GDT, IDT and Machine Check handler into SMRAM.
+ //
+ Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile;
+ Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile;
+
+ GdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize);
+ ASSERT (GdtForAp != NULL);
+ IdtForAp = (VOID *) ((UINTN)GdtForAp + (Gdtr->Limit + 1));
+ MachineCheckHandlerForAp = (VOID *) ((UINTN)IdtForAp + (Idtr->Limit + 1));
+
+ CopyMem (GdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1);
+ CopyMem (IdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1);
+ CopyMem (MachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize);
+
+ Gdtr->Base = (UINTN)GdtForAp;
+ Idtr->Base = (UINTN)IdtForAp;
+ mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
+
+ CpuStatus = &mAcpiCpuData.CpuStatus;
+ CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
+ if (AcpiCpuData->CpuStatus.ThreadCountPerPackage != 0) {
+ CpuStatus->ThreadCountPerPackage = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
+ sizeof (UINT32) * CpuStatus->PackageCount,
+ (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ThreadCountPerPackage
+ );
+ ASSERT (CpuStatus->ThreadCountPerPackage != 0);
+ }
+ if (AcpiCpuData->CpuStatus.ThreadCountPerCore != 0) {
+ CpuStatus->ThreadCountPerCore = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
+ sizeof (UINT8) * (CpuStatus->PackageCount * CpuStatus->MaxCoreCount),
+ (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ThreadCountPerCore
+ );
+ ASSERT (CpuStatus->ThreadCountPerCore != 0);
+ }
+ if (AcpiCpuData->ApLocation != 0) {
+ mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
+ mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
+ (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
+ );
+ ASSERT (mAcpiCpuData.ApLocation != 0);
+ }
+ if (CpuStatus->PackageCount != 0) {
+ mCpuFlags.CoreSemaphoreCount = AllocateZeroPool (
+ sizeof (UINT32) * CpuStatus->PackageCount *
+ CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
+ );
+ ASSERT (mCpuFlags.CoreSemaphoreCount != NULL);
+ mCpuFlags.PackageSemaphoreCount = AllocateZeroPool (
+ sizeof (UINT32) * CpuStatus->PackageCount *
+ CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount
+ );
+ ASSERT (mCpuFlags.PackageSemaphoreCount != NULL);
+ }
+ InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
+}
+
+/**
+ Get ACPI S3 enable flag.
+
+**/
+VOID
+GetAcpiS3EnableFlag (
+ VOID
+ )
+{
+ mAcpiS3Enable = PcdGetBool (PcdAcpiS3Enable);
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.c
new file mode 100644
index 00000000..79ff0ea3
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.c
@@ -0,0 +1,366 @@
+/** @file
+Implementation of SMM CPU Services Protocol.
+
+Copyright (c) 2011 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+//
+// SMM CPU Service Protocol instance
+//
+EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService = {
+ SmmGetProcessorInfo,
+ SmmSwitchBsp,
+ SmmAddProcessor,
+ SmmRemoveProcessor,
+ SmmWhoAmI,
+ SmmRegisterExceptionHandler
+};
+
+/**
+ Gets processor information on the requested processor at the instant this call is made.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of processor.
+ @param[out] ProcessorInfoBuffer A pointer to the buffer where information for
+ the requested processor is deposited.
+
+ @retval EFI_SUCCESS Processor information was returned.
+ @retval EFI_INVALID_PARAMETER ProcessorInfoBuffer is NULL.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+ @retval EFI_NOT_FOUND The processor with the handle specified by
+ ProcessorNumber does not exist in the platform.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmGetProcessorInfo (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber,
+ OUT EFI_PROCESSOR_INFORMATION *ProcessorInfoBuffer
+ )
+{
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber >= mMaxNumberOfCpus || ProcessorInfoBuffer == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Fill in processor information
+ //
+ CopyMem (ProcessorInfoBuffer, &gSmmCpuPrivate->ProcessorInfo[ProcessorNumber], sizeof (EFI_PROCESSOR_INFORMATION));
+ return EFI_SUCCESS;
+}
+
+/**
+ This service switches the requested AP to be the BSP since the next SMI.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of AP that is to become the new BSP.
+
+ @retval EFI_SUCCESS BSP will be switched in next SMI.
+ @retval EFI_UNSUPPORTED Switching the BSP or a processor to be hot-removed is not supported.
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+**/
+EFI_STATUS
+EFIAPI
+SmmSwitchBsp (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber
+ )
+{
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber >= mMaxNumberOfCpus) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {
+ return EFI_NOT_FOUND;
+ }
+
+ if (gSmmCpuPrivate->Operation[ProcessorNumber] != SmmCpuNone ||
+ gSmst->CurrentlyExecutingCpu == ProcessorNumber) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Setting of the BSP for next SMI is pending until all SMI handlers are finished
+ //
+ gSmmCpuPrivate->Operation[ProcessorNumber] = SmmCpuSwitchBsp;
+ return EFI_SUCCESS;
+}
+
+/**
+ Notify that a processor was hot-added.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorId Local APIC ID of the hot-added processor.
+ @param[out] ProcessorNumber The handle number of the hot-added processor.
+
+ @retval EFI_SUCCESS The hot-addition of the specified processors was successfully notified.
+ @retval EFI_UNSUPPORTED Hot addition of processor is not supported.
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+ @retval EFI_ALREADY_STARTED The processor is already online in the system.
+**/
+EFI_STATUS
+EFIAPI
+SmmAddProcessor (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINT64 ProcessorId,
+ OUT UINTN *ProcessorNumber
+ )
+{
+ UINTN Index;
+
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber == NULL || ProcessorId == INVALID_APIC_ID) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Check if the processor already exists
+ //
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ProcessorId) {
+ return EFI_ALREADY_STARTED;
+ }
+ }
+
+ //
+ // Check CPU hot plug data. The CPU RAS handler should have created the mapping
+ // of the APIC ID to SMBASE.
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (mCpuHotPlugData.ApicId[Index] == ProcessorId &&
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = ProcessorId;
+ gSmmCpuPrivate->ProcessorInfo[Index].StatusFlag = 0;
+ GetProcessorLocationByApicId (
+ (UINT32)ProcessorId,
+ &gSmmCpuPrivate->ProcessorInfo[Index].Location.Package,
+ &gSmmCpuPrivate->ProcessorInfo[Index].Location.Core,
+ &gSmmCpuPrivate->ProcessorInfo[Index].Location.Thread
+ );
+
+ *ProcessorNumber = Index;
+ gSmmCpuPrivate->Operation[Index] = SmmCpuAdd;
+ return EFI_SUCCESS;
+ }
+ }
+
+ return EFI_INVALID_PARAMETER;
+}
+
+/**
+ Notify that a processor was hot-removed.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of the hot-added processor.
+
+ @retval EFI_SUCCESS The hot-removal of the specified processors was successfully notified.
+ @retval EFI_UNSUPPORTED Hot removal of processor is not supported.
+ @retval EFI_UNSUPPORTED Hot removal of BSP is not supported.
+ @retval EFI_UNSUPPORTED Hot removal of a processor with pending hot-plug operation is not supported.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+**/
+EFI_STATUS
+EFIAPI
+SmmRemoveProcessor (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber
+ )
+{
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber >= mMaxNumberOfCpus ||
+ gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Can't remove BSP
+ //
+ if (ProcessorNumber == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
+ return EFI_UNSUPPORTED;
+ }
+
+ if (gSmmCpuPrivate->Operation[ProcessorNumber] != SmmCpuNone) {
+ return EFI_UNSUPPORTED;
+ }
+
+ gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId = INVALID_APIC_ID;
+ mCpuHotPlugData.ApicId[ProcessorNumber] = INVALID_APIC_ID;
+
+ //
+ // Removal of the processor from the CPU list is pending until all SMI handlers are finished
+ //
+ gSmmCpuPrivate->Operation[ProcessorNumber] = SmmCpuRemove;
+ return EFI_SUCCESS;
+}
+
+/**
+ This return the handle number for the calling processor.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[out] ProcessorNumber The handle number of currently executing processor.
+
+ @retval EFI_SUCCESS The current processor handle number was returned
+ in ProcessorNumber.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmWhoAmI (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ OUT UINTN *ProcessorNumber
+ )
+{
+ UINTN Index;
+ UINT64 ApicId;
+
+ //
+ // Check parameter
+ //
+ if (ProcessorNumber == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ ApicId = GetApicId ();
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
+ *ProcessorNumber = Index;
+ return EFI_SUCCESS;
+ }
+ }
+ //
+ // This should not happen
+ //
+ ASSERT (FALSE);
+ return EFI_NOT_FOUND;
+}
+
+/**
+ Update the SMM CPU list per the pending operation.
+
+ This function is called after return from SMI handlers.
+**/
+VOID
+SmmCpuUpdate (
+ VOID
+ )
+{
+ UINTN Index;
+
+ //
+ // Handle pending BSP switch operations
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuSwitchBsp) {
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
+ mSmmMpSyncData->SwitchBsp = TRUE;
+ mSmmMpSyncData->CandidateBsp[Index] = TRUE;
+ }
+ }
+
+ //
+ // Handle pending hot-add operations
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuAdd) {
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
+ mNumberOfCpus++;
+ }
+ }
+
+ //
+ // Handle pending hot-remove operations
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
+ mNumberOfCpus--;
+ }
+ }
+}
+
+/**
+ Register exception handler.
+
+ @param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance.
+ @param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and
+ the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL
+ of the UEFI 2.0 specification.
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER
+ that is called when a processor interrupt occurs.
+ If this parameter is NULL, then the handler will be uninstalled.
+
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed.
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed.
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmRegisterExceptionHandler (
+ IN EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN EFI_EXCEPTION_TYPE ExceptionType,
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler
+ )
+{
+ return RegisterCpuInterruptHandler (ExceptionType, InterruptHandler);
+}
+
+/**
+ Initialize SMM CPU Services.
+
+ It installs EFI SMM CPU Services Protocol.
+
+ @param ImageHandle The firmware allocated handle for the EFI image.
+
+ @retval EFI_SUCCESS EFI SMM CPU Services Protocol was installed successfully.
+**/
+EFI_STATUS
+InitializeSmmCpuServices (
+ IN EFI_HANDLE Handle
+ )
+{
+ EFI_STATUS Status;
+
+ Status = gSmst->SmmInstallProtocolInterface (
+ &Handle,
+ &gEfiSmmCpuServiceProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmCpuService
+ );
+ ASSERT_EFI_ERROR (Status);
+ return Status;
+}
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.h b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.h
new file mode 100644
index 00000000..efe90ffe
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.h
@@ -0,0 +1,175 @@
+/** @file
+Include file for SMM CPU Services protocol implementation.
+
+Copyright (c) 2011 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _CPU_SERVICE_H_
+#define _CPU_SERVICE_H_
+
+typedef enum {
+ SmmCpuNone,
+ SmmCpuAdd,
+ SmmCpuRemove,
+ SmmCpuSwitchBsp
+} SMM_CPU_OPERATION;
+
+//
+// SMM CPU Service Protocol function prototypes.
+//
+
+/**
+ Gets processor information on the requested processor at the instant this call is made.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of processor.
+ @param[out] ProcessorInfoBuffer A pointer to the buffer where information for
+ the requested processor is deposited.
+
+ @retval EFI_SUCCESS Processor information was returned.
+ @retval EFI_INVALID_PARAMETER ProcessorInfoBuffer is NULL.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+ @retval EFI_NOT_FOUND The processor with the handle specified by
+ ProcessorNumber does not exist in the platform.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmGetProcessorInfo (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber,
+ OUT EFI_PROCESSOR_INFORMATION *ProcessorInfoBuffer
+ );
+
+/**
+ This service switches the requested AP to be the BSP since the next SMI.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of AP that is to become the new BSP.
+
+ @retval EFI_SUCCESS BSP will be switched in next SMI.
+ @retval EFI_UNSUPPORTED Switching the BSP or a processor to be hot-removed is not supported.
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+**/
+EFI_STATUS
+EFIAPI
+SmmSwitchBsp (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber
+ );
+
+/**
+ Notify that a processor was hot-added.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorId Local APIC ID of the hot-added processor.
+ @param[out] ProcessorNumber The handle number of the hot-added processor.
+
+ @retval EFI_SUCCESS The hot-addition of the specified processors was successfully notified.
+ @retval EFI_UNSUPPORTED Hot addition of processor is not supported.
+ @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+ @retval EFI_ALREADY_STARTED The processor is already online in the system.
+**/
+EFI_STATUS
+EFIAPI
+SmmAddProcessor (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINT64 ProcessorId,
+ OUT UINTN *ProcessorNumber
+ );
+
+/**
+ Notify that a processor was hot-removed.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[in] ProcessorNumber The handle number of the hot-added processor.
+
+ @retval EFI_SUCCESS The hot-removal of the specified processors was successfully notified.
+ @retval EFI_UNSUPPORTED Hot removal of processor is not supported.
+ @retval EFI_UNSUPPORTED Hot removal of BSP is not supported.
+ @retval EFI_UNSUPPORTED Hot removal of a processor with pending hot-plug operation is not supported.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid.
+**/
+EFI_STATUS
+EFIAPI
+SmmRemoveProcessor (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN UINTN ProcessorNumber
+ );
+
+/**
+ This return the handle number for the calling processor.
+
+ @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance.
+ @param[out] ProcessorNumber The handle number of currently executing processor.
+
+ @retval EFI_SUCCESS The current processor handle number was returned
+ in ProcessorNumber.
+ @retval EFI_INVALID_PARAMETER ProcessorNumber is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmWhoAmI (
+ IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ OUT UINTN *ProcessorNumber
+ );
+
+/**
+ Register exception handler.
+
+ @param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance.
+ @param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and
+ the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL
+ of the UEFI 2.0 specification.
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER
+ that is called when a processor interrupt occurs.
+ If this parameter is NULL, then the handler will be uninstalled.
+
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed.
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed.
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmRegisterExceptionHandler (
+ IN EFI_SMM_CPU_SERVICE_PROTOCOL *This,
+ IN EFI_EXCEPTION_TYPE ExceptionType,
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler
+ );
+
+//
+// Internal function prototypes
+//
+
+/**
+ Update the SMM CPU list per the pending operation.
+
+ This function is called after return from SMI handlers.
+**/
+VOID
+SmmCpuUpdate (
+ VOID
+ );
+
+/**
+ Initialize SMM CPU Services.
+
+ It installs EFI SMM CPU Services Protocol.
+
+ @param ImageHandle The firmware allocated handle for the EFI image.
+
+ @retval EFI_SUCCESS EFI SMM CPU Services Protocol was installed successfully.
+**/
+EFI_STATUS
+InitializeSmmCpuServices (
+ IN EFI_HANDLE Handle
+ );
+
+#endif
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Cet.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Cet.nasm
new file mode 100644
index 00000000..9862fd78
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Cet.nasm
@@ -0,0 +1,33 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;-------------------------------------------------------------------------------
+
+%include "Nasm.inc"
+
+SECTION .text
+
+global ASM_PFX(DisableCet)
+ASM_PFX(DisableCet):
+
+ ; Skip the pushed data for call
+ mov eax, 1
+ INCSSP_EAX
+
+ mov eax, cr4
+ btr eax, 23 ; clear CET
+ mov cr4, eax
+ ret
+
+global ASM_PFX(EnableCet)
+ASM_PFX(EnableCet):
+
+ mov eax, cr4
+ bts eax, 23 ; set CET
+ mov cr4, eax
+
+ ; use jmp to skip the check for ret
+ pop eax
+ jmp eax
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm
new file mode 100644
index 00000000..7444505f
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm
@@ -0,0 +1,153 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; MpFuncs.nasm
+;
+; Abstract:
+;
+; This is the assembly code for Multi-processor S3 support
+;
+;-------------------------------------------------------------------------------
+
+SECTION .text
+
+extern ASM_PFX(InitializeFloatingPointUnits)
+
+%define VacantFlag 0x0
+%define NotVacantFlag 0xff
+
+%define LockLocation RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+%define StackStart LockLocation + 0x4
+%define StackSize LockLocation + 0x8
+%define RendezvousProc LockLocation + 0xC
+%define GdtrProfile LockLocation + 0x10
+%define IdtrProfile LockLocation + 0x16
+%define BufferStart LockLocation + 0x1C
+
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+;procedure serializes all the AP processors through an Init sequence. It must be
+;noted that APs arrive here very raw...ie: real mode, no stack.
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+;IS IN MACHINE CODE.
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+BITS 16
+global ASM_PFX(RendezvousFunnelProc)
+ASM_PFX(RendezvousFunnelProc):
+RendezvousFunnelProcStart:
+
+; At this point CS = 0x(vv00) and ip= 0x0.
+
+ mov ax, cs
+ mov ds, ax
+ mov es, ax
+ mov ss, ax
+ xor ax, ax
+ mov fs, ax
+ mov gs, ax
+
+flat32Start:
+
+ mov si, BufferStart
+ mov edx,dword [si] ; EDX is keeping the start address of wakeup buffer
+
+ mov si, GdtrProfile
+o32 lgdt [cs:si]
+
+ mov si, IdtrProfile
+o32 lidt [cs:si]
+
+ xor ax, ax
+ mov ds, ax
+
+ mov eax, cr0 ; Get control register 0
+ or eax, 0x000000001 ; Set PE bit (bit #0)
+ mov cr0, eax
+
+FLAT32_JUMP:
+
+a32 jmp dword 0x20:0x0
+
+BITS 32
+PMODE_ENTRY: ; protected mode entry point
+
+ mov ax, 0x8
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax ; Flat mode setup.
+
+ mov esi, edx
+
+ mov edi, esi
+ add edi, LockLocation
+ mov al, NotVacantFlag
+TestLock:
+ xchg byte [edi], al
+ cmp al, NotVacantFlag
+ jz TestLock
+
+ProgramStack:
+
+ mov edi, esi
+ add edi, StackSize
+ mov eax, dword [edi]
+ mov edi, esi
+ add edi, StackStart
+ add eax, dword [edi]
+ mov esp, eax
+ mov dword [edi], eax
+
+Releaselock:
+
+ mov al, VacantFlag
+ mov edi, esi
+ add edi, LockLocation
+ xchg byte [edi], al
+
+ ;
+ ; Call assembly function to initialize FPU.
+ ;
+ mov ebx, ASM_PFX(InitializeFloatingPointUnits)
+ call ebx
+ ;
+ ; Call C Function
+ ;
+ mov edi, esi
+ add edi, RendezvousProc
+ mov eax, dword [edi]
+
+ test eax, eax
+ jz GoToSleep
+ call eax ; Call C function
+
+GoToSleep:
+ cli
+ hlt
+ jmp $-2
+
+RendezvousFunnelProcEnd:
+;-------------------------------------------------------------------------------------
+; AsmGetAddressMap (&AddressMap);
+;-------------------------------------------------------------------------------------
+global ASM_PFX(AsmGetAddressMap)
+ASM_PFX(AsmGetAddressMap):
+
+ pushad
+ mov ebp,esp
+
+ mov ebx, dword [ebp+0x24]
+ mov dword [ebx], RendezvousFunnelProcStart
+ mov dword [ebx+0x4], PMODE_ENTRY - RendezvousFunnelProcStart
+ mov dword [ebx+0x8], FLAT32_JUMP - RendezvousFunnelProcStart
+ mov dword [ebx+0xc], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+
+ popad
+ ret
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
new file mode 100644
index 00000000..628d8c37
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -0,0 +1,374 @@
+/** @file
+Page table manipulation functions for IA-32 processors
+
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+/**
+ Disable CET.
+**/
+VOID
+EFIAPI
+DisableCet (
+ VOID
+ );
+
+/**
+ Enable CET.
+**/
+VOID
+EFIAPI
+EnableCet (
+ VOID
+ );
+
+/**
+ Get page table base address and the depth of the page table.
+
+ @param[out] Base Page table base address.
+ @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
+**/
+VOID
+GetPageTable (
+ OUT UINTN *Base,
+ OUT BOOLEAN *FiveLevels OPTIONAL
+ )
+{
+ *Base = ((mInternalCr3 == 0) ?
+ (AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64) :
+ mInternalCr3);
+ if (FiveLevels != NULL) {
+ *FiveLevels = FALSE;
+ }
+}
+
+/**
+ Create PageTable for SMM use.
+
+ @return PageTable Address
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ )
+{
+ UINTN PageFaultHandlerHookAddress;
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
+ EFI_STATUS Status;
+
+ //
+ // Initialize spin lock
+ //
+ InitializeSpinLock (mPFLock);
+
+ mPhysicalAddressBits = 32;
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
+ HEAP_GUARD_NONSTOP_MODE ||
+ NULL_DETECTION_NONSTOP_MODE) {
+ //
+ // Set own Page Fault entry instead of the default one, because SMM Profile
+ // feature depends on IRET instruction to do Single Step
+ //
+ PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
+ IdtEntry->Bits.Reserved_0 = 0;
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
+ } else {
+ //
+ // Register SMM Page Fault Handler
+ //
+ Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ //
+ // Additional SMM IDT initialization for SMM stack guard
+ //
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ InitializeIDTSmmStackGuard ();
+ }
+ return Gen4GPageTable (TRUE);
+}
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+SmiDefaultPFHandler (
+ VOID
+ )
+{
+ CpuDeadLoop ();
+}
+
+/**
+ ThePage Fault handler wrapper for SMM use.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN PFAddress;
+ UINTN GuardPageAddress;
+ UINTN CpuIndex;
+
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
+
+ AcquireSpinLock (mPFLock);
+
+ PFAddress = AsmReadCr2 ();
+
+ //
+ // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
+ // or SMM page protection violation.
+ //
+ if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
+ DumpCpuContext (InterruptType, SystemContext);
+ CpuIndex = GetCpuIndex ();
+ GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (PFAddress >= GuardPageAddress) &&
+ (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
+ DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
+ } else {
+ if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%x)\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);
+ );
+ } else {
+ DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%x)\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextIa32->Eip);
+ );
+ }
+
+ if (HEAP_GUARD_NONSTOP_MODE) {
+ GuardPagePFHandler (SystemContext.SystemContextIa32->ExceptionData);
+ goto Exit;
+ }
+ }
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If a page fault occurs in non-SMRAM range.
+ //
+ if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
+ if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);
+ );
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If NULL pointer was just accessed
+ //
+ if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
+ (PFAddress < EFI_PAGE_SIZE)) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextIa32->Eip);
+ );
+
+ if (NULL_DETECTION_NONSTOP_MODE) {
+ GuardPagePFHandler (SystemContext.SystemContextIa32->ExceptionData);
+ goto Exit;
+ }
+
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ if (IsSmmCommBufferForbiddenAddress (PFAddress)) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%x)!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextIa32->Eip);
+ );
+ CpuDeadLoop ();
+ goto Exit;
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfilePFHandler (
+ SystemContext.SystemContextIa32->Eip,
+ SystemContext.SystemContextIa32->ExceptionData
+ );
+ } else {
+ DumpCpuContext (InterruptType, SystemContext);
+ SmiDefaultPFHandler ();
+ }
+
+Exit:
+ ReleaseSpinLock (mPFLock);
+}
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ )
+{
+ UINTN Index2;
+ UINTN Index3;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINTN PageTableBase;
+ BOOLEAN IsSplitted;
+ BOOLEAN PageTableSplitted;
+ BOOLEAN CetEnabled;
+
+ //
+ // Don't mark page table to read-only if heap guard is enabled.
+ //
+ // BIT2: SMM page guard enabled
+ // BIT3: SMM pool guard enabled
+ //
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) {
+ DEBUG ((DEBUG_INFO, "Don't mark page table to read-only as heap guard is enabled\n"));
+ return ;
+ }
+
+ //
+ // Don't mark page table to read-only if SMM profile is enabled.
+ //
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ DEBUG ((DEBUG_INFO, "Don't mark page table to read-only as SMM profile is enabled\n"));
+ return ;
+ }
+
+ DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
+
+ //
+ // Disable write protection, because we need mark page table to be write protected.
+ // We need *write* page table memory, to mark itself to be *read only*.
+ //
+ CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
+ if (CetEnabled) {
+ //
+ // CET must be disabled if WP is disabled.
+ //
+ DisableCet();
+ }
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ do {
+ DEBUG ((DEBUG_INFO, "Start...\n"));
+ PageTableSplitted = FALSE;
+
+ GetPageTable (&PageTableBase, NULL);
+ L3PageTable = (UINT64 *)PageTableBase;
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index3 = 0; Index3 < 4; Index3++) {
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ continue;
+ }
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L1PageTable == NULL) {
+ continue;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+ }
+ } while (PageTableSplitted);
+
+ //
+ // Enable write protection, after page table updated.
+ //
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+ if (CetEnabled) {
+ //
+ // re-enable CET.
+ //
+ EnableCet();
+ }
+
+ return ;
+}
+
+/**
+ This function returns with no action for 32 bit.
+
+ @param[out] *Cr2 Pointer to variable to hold CR2 register value.
+**/
+VOID
+SaveCr2 (
+ OUT UINTN *Cr2
+ )
+{
+ return ;
+}
+
+/**
+ This function returns with no action for 32 bit.
+
+ @param[in] Cr2 Value to write into CR2 register.
+**/
+VOID
+RestoreCr2 (
+ IN UINTN Cr2
+ )
+{
+ return ;
+}
+
+/**
+ Return whether access to non-SMRAM is restricted.
+
+ @retval TRUE Access to non-SMRAM is restricted.
+ @retval FALSE Access to non-SMRAM is not restricted.
+**/
+BOOLEAN
+IsRestrictedMemoryAccess (
+ VOID
+ )
+{
+ return TRUE;
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
new file mode 100644
index 00000000..020dc782
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
@@ -0,0 +1,42 @@
+/** @file
+Semaphore mechanism to indicate to the BSP that an AP has exited SMM
+after SMBASE relocation.
+
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+UINTN mSmmRelocationOriginalAddress;
+volatile BOOLEAN *mRebasedFlag;
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE
+ immediately after AP exits SMM.
+
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex,
+ IN volatile BOOLEAN *RebasedFlag
+ )
+{
+ SMRAM_SAVE_STATE_MAP *CpuState;
+
+ mRebasedFlag = RebasedFlag;
+
+ CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
+ mSmmRelocationOriginalAddress = (UINTN)HookReturnFromSmm (
+ CpuIndex,
+ CpuState,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
+ );
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
new file mode 100644
index 00000000..84057170
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
@@ -0,0 +1,314 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2019, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmiEntry.nasm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+%include "StuffRsbNasm.inc"
+%include "Nasm.inc"
+
+%define MSR_IA32_S_CET 0x6A2
+%define MSR_IA32_CET_SH_STK_EN 0x1
+%define MSR_IA32_CET_WR_SHSTK_EN 0x2
+%define MSR_IA32_CET_ENDBR_EN 0x4
+%define MSR_IA32_CET_LEG_IW_EN 0x8
+%define MSR_IA32_CET_NO_TRACK_EN 0x10
+%define MSR_IA32_CET_SUPPRESS_DIS 0x20
+%define MSR_IA32_CET_SUPPRESS 0x400
+%define MSR_IA32_CET_TRACKER 0x800
+%define MSR_IA32_PL0_SSP 0x6A4
+
+%define CR4_CET 0x800000
+
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
+;
+; Constants relating to PROCESSOR_SMM_DESCRIPTOR
+;
+%define DSC_OFFSET 0xfb00
+%define DSC_GDTPTR 0x30
+%define DSC_GDTSIZ 0x38
+%define DSC_CS 14
+%define DSC_DS 16
+%define DSC_SS 18
+%define DSC_OTHERSEG 20
+
+%define PROTECT_MODE_CS 0x8
+%define PROTECT_MODE_DS 0x20
+%define TSS_SEGMENT 0x40
+
+extern ASM_PFX(SmiRendezvous)
+extern ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+extern ASM_PFX(CpuSmmDebugEntry)
+extern ASM_PFX(CpuSmmDebugExit)
+
+global ASM_PFX(gcSmiHandlerTemplate)
+global ASM_PFX(gcSmiHandlerSize)
+global ASM_PFX(gPatchSmiCr3)
+global ASM_PFX(gPatchSmiStack)
+global ASM_PFX(gPatchSmbase)
+extern ASM_PFX(mXdSupported)
+global ASM_PFX(gPatchXdSupported)
+global ASM_PFX(gPatchMsrIa32MiscEnableSupported)
+extern ASM_PFX(gSmiHandlerIdtr)
+
+extern ASM_PFX(mCetSupported)
+global ASM_PFX(mPatchCetSupported)
+global ASM_PFX(mPatchCetPl0Ssp)
+global ASM_PFX(mPatchCetInterruptSsp)
+
+ SECTION .text
+
+BITS 16
+ASM_PFX(gcSmiHandlerTemplate):
+_SmiEntryPoint:
+ mov bx, _GdtDesc - _SmiEntryPoint + 0x8000
+ mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]
+ dec ax
+ mov [cs:bx], ax
+ mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]
+ mov [cs:bx + 2], eax
+ mov ebp, eax ; ebp = GDT base
+o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
+ mov ax, PROTECT_MODE_CS
+ mov [cs:bx-0x2],ax
+ mov edi, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmbase):
+ lea eax, [edi + (@32bit - _SmiEntryPoint) + 0x8000]
+ mov [cs:bx-0x6],eax
+ mov ebx, cr0
+ and ebx, 0x9ffafff3
+ or ebx, 0x23
+ mov cr0, ebx
+ jmp dword 0x0:0x0
+_GdtDesc:
+ DW 0
+ DD 0
+
+BITS 32
+@32bit:
+ mov ax, PROTECT_MODE_DS
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax
+ mov esp, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmiStack):
+ mov eax, ASM_PFX(gSmiHandlerIdtr)
+ lidt [eax]
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmiCr3):
+ mov cr3, eax
+;
+; Need to test for CR4 specific bit support
+;
+ mov eax, 1
+ cpuid ; use CPUID to determine if specific CR4 bits are supported
+ xor eax, eax ; Clear EAX
+ test edx, BIT2 ; Check for DE capabilities
+ jz .0
+ or eax, BIT3
+.0:
+ test edx, BIT6 ; Check for PAE capabilities
+ jz .1
+ or eax, BIT5
+.1:
+ test edx, BIT7 ; Check for MCE capabilities
+ jz .2
+ or eax, BIT6
+.2:
+ test edx, BIT24 ; Check for FXSR capabilities
+ jz .3
+ or eax, BIT9
+.3:
+ test edx, BIT25 ; Check for SSE capabilities
+ jz .4
+ or eax, BIT10
+.4: ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+
+ cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
+ jz .6
+; Load TSS
+ mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+.6:
+
+; enable NXE if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(gPatchXdSupported):
+ cmp al, 0
+ jz @SkipXd
+
+; If MSR_IA32_MISC_ENABLE is supported, clear XD Disable bit
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(gPatchMsrIa32MiscEnableSupported):
+ cmp al, 1
+ jz MsrIa32MiscEnableSupported
+
+; MSR_IA32_MISC_ENABLE not supported
+ xor edx, edx
+ push edx ; don't try to restore the XD Disable bit just before RSM
+ jmp EnableNxe
+
+;
+; Check XD disable bit
+;
+MsrIa32MiscEnableSupported:
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz EnableNxe
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+EnableNxe:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 4
+@XdDone:
+
+ mov ebx, cr0
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
+ mov cr0, ebx
+ lea ebx, [edi + DSC_OFFSET]
+ mov ax, [ebx + DSC_DS]
+ mov ds, eax
+ mov ax, [ebx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [ebx + DSC_SS]
+ mov ss, eax
+
+ mov ebx, [esp + 4] ; ebx <- CpuIndex
+
+; enable CET if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(mPatchCetSupported):
+ cmp al, 0
+ jz CetDone
+
+ mov ecx, MSR_IA32_S_CET
+ rdmsr
+ push edx
+ push eax
+
+ mov ecx, MSR_IA32_PL0_SSP
+ rdmsr
+ push edx
+ push eax
+
+ mov ecx, MSR_IA32_S_CET
+ mov eax, MSR_IA32_CET_SH_STK_EN
+ xor edx, edx
+ wrmsr
+
+ mov ecx, MSR_IA32_PL0_SSP
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetPl0Ssp):
+ xor edx, edx
+ wrmsr
+ mov ecx, cr0
+ btr ecx, 16 ; clear WP
+ mov cr0, ecx
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetInterruptSsp):
+ cmp eax, 0
+ jz CetInterruptDone
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+CetInterruptDone:
+
+ mov ecx, cr0
+ bts ecx, 16 ; set WP
+ mov cr0, ecx
+
+ mov eax, 0x668 | CR4_CET
+ mov cr4, eax
+
+ SETSSBSY
+
+CetDone:
+
+ push ebx
+ mov eax, ASM_PFX(CpuSmmDebugEntry)
+ call eax
+ add esp, 4
+
+ push ebx
+ mov eax, ASM_PFX(SmiRendezvous)
+ call eax
+ add esp, 4
+
+ push ebx
+ mov eax, ASM_PFX(CpuSmmDebugExit)
+ call eax
+ add esp, 4
+
+ mov eax, ASM_PFX(mCetSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz CetDone2
+
+ mov eax, 0x668
+ mov cr4, eax ; disable CET
+
+ mov ecx, MSR_IA32_PL0_SSP
+ pop eax
+ pop edx
+ wrmsr
+
+ mov ecx, MSR_IA32_S_CET
+ pop eax
+ pop edx
+ wrmsr
+CetDone2:
+
+ mov eax, ASM_PFX(mXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz .7
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .7
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.7:
+
+ StuffRsb32
+ rsm
+
+ASM_PFX(gcSmiHandlerSize): DW $ - _SmiEntryPoint
+
+global ASM_PFX(PiSmmCpuSmiEntryFixupAddress)
+ASM_PFX(PiSmmCpuSmiEntryFixupAddress):
+ ret
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
new file mode 100644
index 00000000..bcd5a007
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
@@ -0,0 +1,705 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmiException.nasm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+extern ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+extern ASM_PFX(SmiPFHandler)
+extern ASM_PFX(mSetupDebugTrap)
+
+global ASM_PFX(gcSmiIdtr)
+global ASM_PFX(gcSmiGdtr)
+global ASM_PFX(gTaskGateDescriptor)
+global ASM_PFX(gcPsd)
+
+ SECTION .data
+
+NullSeg: DQ 0 ; reserved by architecture
+CodeSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeCodeSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeSsSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x93
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+DataSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x93
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+CodeSeg16:
+ DW -1
+ DW 0
+ DB 0
+ DB 0x9b
+ DB 0x8f
+ DB 0
+DataSeg16:
+ DW -1
+ DW 0
+ DB 0
+ DB 0x93
+ DB 0x8f
+ DB 0
+CodeSeg64:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xaf ; LimitHigh
+ DB 0 ; BaseHigh
+GDT_SIZE equ $ - NullSeg
+
+TssSeg:
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x89
+ DB 0x80 ; LimitHigh
+ DB 0 ; BaseHigh
+ExceptionTssSeg:
+ DW EXCEPTION_TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x89
+ DB 0x80 ; LimitHigh
+ DB 0 ; BaseHigh
+
+CODE_SEL equ CodeSeg32 - NullSeg
+DATA_SEL equ DataSeg32 - NullSeg
+TSS_SEL equ TssSeg - NullSeg
+EXCEPTION_TSS_SEL equ ExceptionTssSeg - NullSeg
+
+struc IA32_TSS
+ resw 1
+ resw 1
+ .ESP0: resd 1
+ .SS0: resw 1
+ resw 1
+ .ESP1: resd 1
+ .SS1: resw 1
+ resw 1
+ .ESP2: resd 1
+ .SS2: resw 1
+ resw 1
+ ._CR3: resd 1
+ .EIP: resd 1
+ .EFLAGS: resd 1
+ ._EAX: resd 1
+ ._ECX: resd 1
+ ._EDX: resd 1
+ ._EBX: resd 1
+ ._ESP: resd 1
+ ._EBP: resd 1
+ ._ESI: resd 1
+ ._EDI: resd 1
+ ._ES: resw 1
+ resw 1
+ ._CS: resw 1
+ resw 1
+ ._SS: resw 1
+ resw 1
+ ._DS: resw 1
+ resw 1
+ ._FS: resw 1
+ resw 1
+ ._GS: resw 1
+ resw 1
+ .LDT: resw 1
+ resw 1
+ resw 1
+ resw 1
+endstruc
+
+; Create 2 TSS segments just after GDT
+TssDescriptor:
+ DW 0 ; PreviousTaskLink
+ DW 0 ; Reserved
+ DD 0 ; ESP0
+ DW 0 ; SS0
+ DW 0 ; Reserved
+ DD 0 ; ESP1
+ DW 0 ; SS1
+ DW 0 ; Reserved
+ DD 0 ; ESP2
+ DW 0 ; SS2
+ DW 0 ; Reserved
+ DD 0 ; CR3
+ DD 0 ; EIP
+ DD 0 ; EFLAGS
+ DD 0 ; EAX
+ DD 0 ; ECX
+ DD 0 ; EDX
+ DD 0 ; EBX
+ DD 0 ; ESP
+ DD 0 ; EBP
+ DD 0 ; ESI
+ DD 0 ; EDI
+ DW 0 ; ES
+ DW 0 ; Reserved
+ DW 0 ; CS
+ DW 0 ; Reserved
+ DW 0 ; SS
+ DW 0 ; Reserved
+ DW 0 ; DS
+ DW 0 ; Reserved
+ DW 0 ; FS
+ DW 0 ; Reserved
+ DW 0 ; GS
+ DW 0 ; Reserved
+ DW 0 ; LDT Selector
+ DW 0 ; Reserved
+ DW 0 ; T
+ DW 0 ; I/O Map Base
+TSS_DESC_SIZE equ $ - TssDescriptor
+
+ExceptionTssDescriptor:
+ DW 0 ; PreviousTaskLink
+ DW 0 ; Reserved
+ DD 0 ; ESP0
+ DW 0 ; SS0
+ DW 0 ; Reserved
+ DD 0 ; ESP1
+ DW 0 ; SS1
+ DW 0 ; Reserved
+ DD 0 ; ESP2
+ DW 0 ; SS2
+ DW 0 ; Reserved
+ DD 0 ; CR3
+ DD PFHandlerEntry ; EIP
+ DD 00000002 ; EFLAGS
+ DD 0 ; EAX
+ DD 0 ; ECX
+ DD 0 ; EDX
+ DD 0 ; EBX
+ DD 0 ; ESP
+ DD 0 ; EBP
+ DD 0 ; ESI
+ DD 0 ; EDI
+ DW DATA_SEL ; ES
+ DW 0 ; Reserved
+ DW CODE_SEL ; CS
+ DW 0 ; Reserved
+ DW DATA_SEL ; SS
+ DW 0 ; Reserved
+ DW DATA_SEL ; DS
+ DW 0 ; Reserved
+ DW DATA_SEL ; FS
+ DW 0 ; Reserved
+ DW DATA_SEL ; GS
+ DW 0 ; Reserved
+ DW 0 ; LDT Selector
+ DW 0 ; Reserved
+ DW 0 ; T
+ DW 0 ; I/O Map Base
+ DD 0 ; SSP
+EXCEPTION_TSS_DESC_SIZE equ $ - ExceptionTssDescriptor
+
+ASM_PFX(gcPsd):
+ DB 'PSDSIG '
+ DW PSD_SIZE
+ DW 2
+ DW 1 << 2
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW 0
+ DQ 0
+ DQ 0
+ DQ 0
+ DD 0
+ DD NullSeg
+ DD GDT_SIZE
+ DD 0
+ times 24 DB 0
+ DD 0
+ DD 0
+PSD_SIZE equ $ - ASM_PFX(gcPsd)
+
+ASM_PFX(gcSmiGdtr):
+ DW GDT_SIZE - 1
+ DD NullSeg
+
+ASM_PFX(gcSmiIdtr):
+ DW 0
+ DD 0
+
+ASM_PFX(gTaskGateDescriptor):
+ DW 0 ; Reserved
+ DW EXCEPTION_TSS_SEL ; TSS Segment selector
+ DB 0 ; Reserved
+ DB 0x85 ; Task Gate, present, DPL = 0
+ DW 0 ; Reserved
+
+ SECTION .text
+;------------------------------------------------------------------------------
+; PageFaultIdtHandlerSmmProfile is the entry point page fault only
+;
+;
+; Stack:
+; +---------------------+
+; + EFlags +
+; +---------------------+
+; + CS +
+; +---------------------+
+; + EIP +
+; +---------------------+
+; + Error Code +
+; +---------------------+
+; + Vector Number +
+; +---------------------+
+; + EBP +
+; +---------------------+ <-- EBP
+;
+;
+;------------------------------------------------------------------------------
+global ASM_PFX(PageFaultIdtHandlerSmmProfile)
+ASM_PFX(PageFaultIdtHandlerSmmProfile):
+ push 0xe ; Page Fault
+
+ push ebp
+ mov ebp, esp
+
+ ;
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ ; is 16-byte aligned
+ ;
+ and esp, 0xfffffff0
+ sub esp, 12
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ push eax
+ push ecx
+ push edx
+ push ebx
+ lea ecx, [ebp + 6 * 4]
+ push ecx ; ESP
+ push dword [ebp] ; EBP
+ push esi
+ push edi
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ mov eax, ss
+ push eax
+ movzx eax, word [ebp + 4 * 4]
+ push eax
+ mov eax, ds
+ push eax
+ mov eax, es
+ push eax
+ mov eax, fs
+ push eax
+ mov eax, gs
+ push eax
+
+;; UINT32 Eip;
+ mov eax, [ebp + 3 * 4]
+ push eax
+
+;; UINT32 Gdtr[2], Idtr[2];
+ sub esp, 8
+ sidt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0xFFFF
+ mov [esp+4], eax
+
+ sub esp, 8
+ sgdt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0xFFFF
+ mov [esp+4], eax
+
+;; UINT32 Ldtr, Tr;
+ xor eax, eax
+ str ax
+ push eax
+ sldt ax
+ push eax
+
+;; UINT32 EFlags;
+ mov eax, [ebp + 5 * 4]
+ push eax
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ mov eax, cr4
+ or eax, 0x208
+ mov cr4, eax
+ push eax
+ mov eax, cr3
+ push eax
+ mov eax, cr2
+ push eax
+ xor eax, eax
+ push eax
+ mov eax, cr0
+ push eax
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov eax, dr7
+ push eax
+ mov eax, dr6
+ push eax
+ mov eax, dr3
+ push eax
+ mov eax, dr2
+ push eax
+ mov eax, dr1
+ push eax
+ mov eax, dr0
+ push eax
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ sub esp, 512
+ mov edi, esp
+ fxsave [edi]
+
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push dword [ebp + 2 * 4]
+
+;; call into exception handler
+
+;; Prepare parameter and call
+ mov edx, esp
+ push edx
+ mov edx, dword [ebp + 1 * 4]
+ push edx
+
+ ;
+ ; Call External Exception Handler
+ ;
+ mov eax, ASM_PFX(SmiPFHandler)
+ call eax
+ add esp, 8
+
+;; UINT32 ExceptionData;
+ add esp, 4
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ mov esi, esp
+ fxrstor [esi]
+ add esp, 512
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support debuggers
+;; that set breakpoint in interrupt/exception context
+ add esp, 4 * 6
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ pop eax
+ mov cr0, eax
+ add esp, 4 ; not for Cr1
+ pop eax
+ mov cr2, eax
+ pop eax
+ mov cr3, eax
+ pop eax
+ mov cr4, eax
+
+;; UINT32 EFlags;
+ pop dword [ebp + 5 * 4]
+
+;; UINT32 Ldtr, Tr;
+;; UINT32 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add esp, 24
+
+;; UINT32 Eip;
+ pop dword [ebp + 3 * 4]
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+;; NOTE - modified segment registers could hang the debugger... We
+;; could attempt to insulate ourselves against this possibility,
+;; but that poses risks as well.
+;;
+ pop gs
+ pop fs
+ pop es
+ pop ds
+ pop dword [ebp + 4 * 4]
+ pop ss
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pop edi
+ pop esi
+ add esp, 4 ; not for ebp
+ add esp, 4 ; not for esp
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+
+ mov esp, ebp
+ pop ebp
+
+; Enable TF bit after page fault handler runs
+ bts dword [esp + 16], 8 ; EFLAGS
+
+ add esp, 8 ; skip INT# & ErrCode
+Return:
+ iretd
+;
+; Page Fault Exception Handler entry when SMM Stack Guard is enabled
+; Executiot starts here after a task switch
+;
+PFHandlerEntry:
+;
+; Get this processor's TSS
+;
+ sub esp, 8
+ sgdt [esp + 2]
+ mov eax, [esp + 4] ; GDT base
+ add esp, 8
+ mov ecx, [eax + TSS_SEL + 2]
+ shl ecx, 8
+ mov cl, [eax + TSS_SEL + 7]
+ ror ecx, 8 ; ecx = TSS base
+
+ mov ebp, esp
+
+ ;
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ ; is 16-byte aligned
+ ;
+ and esp, 0xfffffff0
+ sub esp, 12
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ push dword [ecx + IA32_TSS._EAX]
+ push dword [ecx + IA32_TSS._ECX]
+ push dword [ecx + IA32_TSS._EDX]
+ push dword [ecx + IA32_TSS._EBX]
+ push dword [ecx + IA32_TSS._ESP]
+ push dword [ecx + IA32_TSS._EBP]
+ push dword [ecx + IA32_TSS._ESI]
+ push dword [ecx + IA32_TSS._EDI]
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ movzx eax, word [ecx + IA32_TSS._SS]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._CS]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._DS]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._ES]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._FS]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._GS]
+ push eax
+
+;; UINT32 Eip;
+ push dword [ecx + IA32_TSS.EIP]
+
+;; UINT32 Gdtr[2], Idtr[2];
+ sub esp, 8
+ sidt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0xFFFF
+ mov [esp+4], eax
+
+ sub esp, 8
+ sgdt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0xFFFF
+ mov [esp+4], eax
+
+;; UINT32 Ldtr, Tr;
+ mov eax, TSS_SEL
+ push eax
+ movzx eax, word [ecx + IA32_TSS.LDT]
+ push eax
+
+;; UINT32 EFlags;
+ push dword [ecx + IA32_TSS.EFLAGS]
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ mov eax, cr4
+ or eax, 0x208
+ mov cr4, eax
+ push eax
+ mov eax, cr3
+ push eax
+ mov eax, cr2
+ push eax
+ xor eax, eax
+ push eax
+ mov eax, cr0
+ push eax
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov eax, dr7
+ push eax
+ mov eax, dr6
+ push eax
+ mov eax, dr3
+ push eax
+ mov eax, dr2
+ push eax
+ mov eax, dr1
+ push eax
+ mov eax, dr0
+ push eax
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+;; Clear TS bit in CR0 to avoid Device Not Available Exception (#NM)
+;; when executing fxsave/fxrstor instruction
+ clts
+ sub esp, 512
+ mov edi, esp
+ fxsave [edi]
+
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push dword [ebp]
+
+;; call into exception handler
+ mov ebx, ecx
+ mov eax, ASM_PFX(SmiPFHandler)
+
+;; Prepare parameter and call
+ mov edx, esp
+ push edx
+ mov edx, 14
+ push edx
+
+ ;
+ ; Call External Exception Handler
+ ;
+ call eax
+ add esp, 8
+
+ mov ecx, ebx
+;; UINT32 ExceptionData;
+ add esp, 4
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ mov esi, esp
+ fxrstor [esi]
+ add esp, 512
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support debuggers
+;; that set breakpoints in interrupt/exception context
+ add esp, 4 * 6
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ pop eax
+ mov cr0, eax
+ add esp, 4 ; not for Cr1
+ pop eax
+ mov cr2, eax
+ pop eax
+ mov dword [ecx + IA32_TSS._CR3], eax
+ pop eax
+ mov cr4, eax
+
+;; UINT32 EFlags;
+ pop dword [ecx + IA32_TSS.EFLAGS]
+
+;; UINT32 Ldtr, Tr;
+;; UINT32 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add esp, 24
+
+;; UINT32 Eip;
+ pop dword [ecx + IA32_TSS.EIP]
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+;; NOTE - modified segment registers could hang the debugger... We
+;; could attempt to insulate ourselves against this possibility,
+;; but that poses risks as well.
+;;
+ pop eax
+o16 mov [ecx + IA32_TSS._GS], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._FS], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._ES], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._DS], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._CS], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._SS], ax
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pop dword [ecx + IA32_TSS._EDI]
+ pop dword [ecx + IA32_TSS._ESI]
+ add esp, 4 ; not for ebp
+ add esp, 4 ; not for esp
+ pop dword [ecx + IA32_TSS._EBX]
+ pop dword [ecx + IA32_TSS._EDX]
+ pop dword [ecx + IA32_TSS._ECX]
+ pop dword [ecx + IA32_TSS._EAX]
+
+ mov esp, ebp
+
+; Set single step DB# if SMM profile is enabled and page fault exception happens
+ cmp byte [dword ASM_PFX(mSetupDebugTrap)], 0
+ jz @Done2
+
+; Create return context for iretd in stub function
+ mov eax, dword [ecx + IA32_TSS._ESP] ; Get old stack pointer
+ mov ebx, dword [ecx + IA32_TSS.EIP]
+ mov [eax - 0xc], ebx ; create EIP in old stack
+ movzx ebx, word [ecx + IA32_TSS._CS]
+ mov [eax - 0x8], ebx ; create CS in old stack
+ mov ebx, dword [ecx + IA32_TSS.EFLAGS]
+ bts ebx, 8
+ mov [eax - 0x4], ebx ; create eflags in old stack
+ mov eax, dword [ecx + IA32_TSS._ESP] ; Get old stack pointer
+ sub eax, 0xc ; minus 12 byte
+ mov dword [ecx + IA32_TSS._ESP], eax ; Set new stack pointer
+; Replace the EIP of interrupted task with stub function
+ mov eax, ASM_PFX(PageFaultStubFunction)
+ mov dword [ecx + IA32_TSS.EIP], eax
+; Jump to the iretd so next page fault handler as a task will start again after iretd.
+@Done2:
+ add esp, 4 ; skip ErrCode
+
+ jmp Return
+
+global ASM_PFX(PageFaultStubFunction)
+ASM_PFX(PageFaultStubFunction):
+;
+; we need clean TS bit in CR0 to execute
+; x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions.
+;
+ clts
+ iretd
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
new file mode 100644
index 00000000..b7f4949c
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
@@ -0,0 +1,204 @@
+/** @file
+ SMM CPU misc functions for Ia32 arch specific.
+
+Copyright (c) 2015 - 2019, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+extern UINT64 gTaskGateDescriptor;
+
+EFI_PHYSICAL_ADDRESS mGdtBuffer;
+UINTN mGdtBufferSize;
+
+extern BOOLEAN mCetSupported;
+extern UINTN mSmmShadowStackSize;
+
+X86_ASSEMBLY_PATCH_LABEL mPatchCetPl0Ssp;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSsp;
+UINT32 mCetPl0Ssp;
+UINT32 mCetInterruptSsp;
+
+/**
+ Initialize IDT for SMM Stack Guard.
+
+**/
+VOID
+EFIAPI
+InitializeIDTSmmStackGuard (
+ VOID
+ )
+{
+ IA32_IDT_GATE_DESCRIPTOR *IdtGate;
+
+ //
+ // If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
+ // is a Task Gate Descriptor so that when a Page Fault Exception occurs,
+ // the processors can use a known good stack in case stack is ran out.
+ //
+ IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
+ IdtGate += EXCEPT_IA32_PAGE_FAULT;
+ IdtGate->Uint64 = gTaskGateDescriptor;
+}
+
+/**
+ Initialize Gdt for all processors.
+
+ @param[in] Cr3 CR3 value.
+ @param[out] GdtStepSize The step size for GDT table.
+
+ @return GdtBase for processor 0.
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)
+**/
+VOID *
+InitGdt (
+ IN UINTN Cr3,
+ OUT UINTN *GdtStepSize
+ )
+{
+ UINTN Index;
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
+ UINTN TssBase;
+ UINTN GdtTssTableSize;
+ UINT8 *GdtTssTables;
+ UINTN GdtTableStepSize;
+ UINTN InterruptShadowStack;
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
+ // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
+ // on each SMI entry.
+ //
+
+ //
+ // Enlarge GDT to contain 2 TSS descriptors
+ //
+ gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
+
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + EXCEPTION_TSS_SIZE + 7) & ~7; // 8 bytes aligned
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ //
+ // IA32 Stack Guard need use task switch to switch stack that need
+ // write GDT and TSS, so AllocateCodePages() could not be used here
+ // as code pages will be set to RO.
+ //
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
+ ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE + EXCEPTION_TSS_SIZE);
+ //
+ // Fixup TSS descriptors
+ //
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
+
+ TssBase += TSS_SIZE;
+ GdtDescriptor++;
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
+ //
+ // Fixup TSS segments
+ //
+ // ESP as known good stack
+ //
+ *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
+ *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
+
+ //
+ // Setup ShadowStack for stack switch
+ //
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ InterruptShadowStack = (UINTN)(mSmmStackArrayBase + mSmmStackSize + EFI_PAGES_TO_SIZE (1) - sizeof(UINT64) + (mSmmStackSize + mSmmShadowStackSize) * Index);
+ *(UINT32 *)(TssBase + TSS_IA32_SSP_OFFSET) = (UINT32)InterruptShadowStack;
+ }
+ }
+ } else {
+ //
+ // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
+ //
+ GdtTssTableSize = gcSmiGdtr.Limit + 1;
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
+ ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);
+ }
+ }
+
+ *GdtStepSize = GdtTableStepSize;
+ return GdtTssTables;
+}
+
+/**
+ Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
+
+ @param[in] ApHltLoopCode The address of the safe hlt-loop function.
+ @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
+ @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
+
+**/
+VOID
+TransferApToSafeState (
+ IN UINTN ApHltLoopCode,
+ IN UINTN TopOfStack,
+ IN UINTN NumberToFinishAddress
+ )
+{
+ SwitchStack (
+ (SWITCH_STACK_ENTRY_POINT)ApHltLoopCode,
+ (VOID *)NumberToFinishAddress,
+ NULL,
+ (VOID *)TopOfStack
+ );
+ //
+ // It should never reach here
+ //
+ ASSERT (FALSE);
+}
+
+/**
+ Initialize the shadow stack related data structure.
+
+ @param CpuIndex The index of CPU.
+ @param ShadowStack The bottom of the shadow stack for this CPU.
+**/
+VOID
+InitShadowStack (
+ IN UINTN CpuIndex,
+ IN VOID *ShadowStack
+ )
+{
+ UINTN SmmShadowStackSize;
+
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ SmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ SmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
+ }
+ mCetPl0Ssp = (UINT32)((UINTN)ShadowStack + SmmShadowStackSize - sizeof(UINT64));
+ PatchInstructionX86 (mPatchCetPl0Ssp, mCetPl0Ssp, 4);
+ DEBUG ((DEBUG_INFO, "mCetPl0Ssp - 0x%x\n", mCetPl0Ssp));
+ DEBUG ((DEBUG_INFO, "ShadowStack - 0x%x\n", ShadowStack));
+ DEBUG ((DEBUG_INFO, " SmmShadowStackSize - 0x%x\n", SmmShadowStackSize));
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ mCetInterruptSsp = (UINT32)((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - sizeof(UINT64));
+ PatchInstructionX86 (mPatchCetInterruptSsp, mCetInterruptSsp, 4);
+ DEBUG ((DEBUG_INFO, "mCetInterruptSsp - 0x%x\n", mCetInterruptSsp));
+ }
+ }
+}
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
new file mode 100644
index 00000000..69291ec1
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
@@ -0,0 +1,96 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmmInit.nasm
+;
+; Abstract:
+;
+; Functions for relocating SMBASE's for all processors
+;
+;-------------------------------------------------------------------------------
+
+%include "StuffRsbNasm.inc"
+
+extern ASM_PFX(SmmInitHandler)
+extern ASM_PFX(mRebasedFlag)
+extern ASM_PFX(mSmmRelocationOriginalAddress)
+
+global ASM_PFX(gPatchSmmCr3)
+global ASM_PFX(gPatchSmmCr4)
+global ASM_PFX(gPatchSmmCr0)
+global ASM_PFX(gPatchSmmInitStack)
+global ASM_PFX(gcSmiInitGdtr)
+global ASM_PFX(gcSmmInitSize)
+global ASM_PFX(gcSmmInitTemplate)
+
+%define PROTECT_MODE_CS 0x8
+%define PROTECT_MODE_DS 0x20
+
+ SECTION .text
+
+ASM_PFX(gcSmiInitGdtr):
+ DW 0
+ DQ 0
+
+global ASM_PFX(SmmStartup)
+
+BITS 16
+ASM_PFX(SmmStartup):
+ mov eax, 0x80000001 ; read capability
+ cpuid
+ mov ebx, edx ; rdmsr will change edx. keep it in ebx.
+ and ebx, BIT20 ; extract NX capability bit
+ shr ebx, 9 ; shift bit to IA32_EFER.NXE[BIT11] position
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr3):
+ mov cr3, eax
+o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr4):
+ mov cr4, eax
+ mov ecx, 0xc0000080 ; IA32_EFER MSR
+ rdmsr
+ or eax, ebx ; set NXE bit if NX is available
+ wrmsr
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr0):
+ mov di, PROTECT_MODE_DS
+ mov cr0, eax
+ jmp PROTECT_MODE_CS : dword @32bit
+
+BITS 32
+@32bit:
+ mov ds, edi
+ mov es, edi
+ mov fs, edi
+ mov gs, edi
+ mov ss, edi
+ mov esp, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmInitStack):
+ call ASM_PFX(SmmInitHandler)
+ StuffRsb32
+ rsm
+
+BITS 16
+ASM_PFX(gcSmmInitTemplate):
+ mov ebp, ASM_PFX(SmmStartup)
+ sub ebp, 0x30000
+ jmp ebp
+
+ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
+
+BITS 32
+global ASM_PFX(SmmRelocationSemaphoreComplete)
+ASM_PFX(SmmRelocationSemaphoreComplete):
+ push eax
+ mov eax, [ASM_PFX(mRebasedFlag)]
+ mov byte [eax], 1
+ pop eax
+ jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
+
+global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
+ASM_PFX(PiSmmCpuSmmInitFixupAddress):
+ ret
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
new file mode 100644
index 00000000..67ad5ace
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
@@ -0,0 +1,74 @@
+/** @file
+IA-32 processor specific functions to enable SMM profile.
+
+Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ )
+{
+ mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (TRUE);
+
+ return ;
+}
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+ 32-bit firmware does not need it.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ )
+{
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception. 32-bit firmware does not need it.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ )
+{
+}
+
+/**
+ Clear TF in FLAGS.
+
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+
+**/
+VOID
+ClearTrapFlag (
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ SystemContext.SystemContextIa32->Eflags &= (UINTN) ~BIT8;
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
new file mode 100644
index 00000000..69f43116
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
@@ -0,0 +1,91 @@
+/** @file
+IA-32 processor specific header file to enable SMM profile.
+
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _SMM_PROFILE_ARCH_H_
+#define _SMM_PROFILE_ARCH_H_
+
+#pragma pack (1)
+
+typedef struct _MSR_DS_AREA_STRUCT {
+ UINT32 BTSBufferBase;
+ UINT32 BTSIndex;
+ UINT32 BTSAbsoluteMaximum;
+ UINT32 BTSInterruptThreshold;
+ UINT32 PEBSBufferBase;
+ UINT32 PEBSIndex;
+ UINT32 PEBSAbsoluteMaximum;
+ UINT32 PEBSInterruptThreshold;
+ UINT32 PEBSCounterReset[4];
+ UINT32 Reserved;
+} MSR_DS_AREA_STRUCT;
+
+typedef struct _BRANCH_TRACE_RECORD {
+ UINT32 LastBranchFrom;
+ UINT32 LastBranchTo;
+ UINT32 Rsvd0 : 4;
+ UINT32 BranchPredicted : 1;
+ UINT32 Rsvd1 : 27;
+} BRANCH_TRACE_RECORD;
+
+typedef struct _PEBS_RECORD {
+ UINT32 Eflags;
+ UINT32 LinearIP;
+ UINT32 Eax;
+ UINT32 Ebx;
+ UINT32 Ecx;
+ UINT32 Edx;
+ UINT32 Esi;
+ UINT32 Edi;
+ UINT32 Ebp;
+ UINT32 Esp;
+} PEBS_RECORD;
+
+#pragma pack ()
+
+#define PHYSICAL_ADDRESS_MASK ((1ull << 32) - SIZE_4KB)
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception. 32-bit firmware does not need it.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ );
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ );
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_ARCH_H_
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
new file mode 100644
index 00000000..ae8c90be
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -0,0 +1,2027 @@
+/** @file
+SMM MP service implementation
+
+Copyright (c) 2009 - 2021, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+//
+// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE)
+//
+MTRR_SETTINGS gSmiMtrrs;
+UINT64 gPhyMask;
+SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL;
+UINTN mSmmMpSyncDataSize;
+SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
+UINTN mSemaphoreSize;
+SPIN_LOCK *mPFLock = NULL;
+SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
+BOOLEAN mMachineCheckSupported = FALSE;
+MM_COMPLETION mSmmStartupThisApToken;
+
+extern UINTN mSmmShadowStackSize;
+
+/**
+ Performs an atomic compare exchange operation to get semaphore.
+ The compare exchange operation must be performed using
+ MP safe mechanisms.
+
+ @param Sem IN: 32-bit unsigned integer
+ OUT: original integer - 1
+ @return Original integer - 1
+
+**/
+UINT32
+WaitForSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ for (;;) {
+ Value = *Sem;
+ if (Value != 0 &&
+ InterlockedCompareExchange32 (
+ (UINT32*)Sem,
+ Value,
+ Value - 1
+ ) == Value) {
+ break;
+ }
+ CpuPause ();
+ }
+ return Value - 1;
+}
+
+
+/**
+ Performs an atomic compare exchange operation to release semaphore.
+ The compare exchange operation must be performed using
+ MP safe mechanisms.
+
+ @param Sem IN: 32-bit unsigned integer
+ OUT: original integer + 1
+ @return Original integer + 1
+
+**/
+UINT32
+ReleaseSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (Value + 1 != 0 &&
+ InterlockedCompareExchange32 (
+ (UINT32*)Sem,
+ Value,
+ Value + 1
+ ) != Value);
+ return Value + 1;
+}
+
+/**
+ Performs an atomic compare exchange operation to lock semaphore.
+ The compare exchange operation must be performed using
+ MP safe mechanisms.
+
+ @param Sem IN: 32-bit unsigned integer
+ OUT: -1
+ @return Original integer
+
+**/
+UINT32
+LockdownSemaphore (
+ IN OUT volatile UINT32 *Sem
+ )
+{
+ UINT32 Value;
+
+ do {
+ Value = *Sem;
+ } while (InterlockedCompareExchange32 (
+ (UINT32*)Sem,
+ Value, (UINT32)-1
+ ) != Value);
+ return Value;
+}
+
+/**
+ Wait all APs to performs an atomic compare exchange operation to release semaphore.
+
+ @param NumberOfAPs AP number
+
+**/
+VOID
+WaitForAllAPs (
+ IN UINTN NumberOfAPs
+ )
+{
+ UINTN BspIndex;
+
+ BspIndex = mSmmMpSyncData->BspIndex;
+ while (NumberOfAPs-- > 0) {
+ WaitForSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
+ }
+}
+
+/**
+ Performs an atomic compare exchange operation to release semaphore
+ for each AP.
+
+**/
+VOID
+ReleaseAllAPs (
+ VOID
+ )
+{
+ UINTN Index;
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (IsPresentAp (Index)) {
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[Index].Run);
+ }
+ }
+}
+
+/**
+ Checks if all CPUs (with certain exceptions) have checked in for this SMI run
+
+ @param Exceptions CPU Arrival exception flags.
+
+ @retval TRUE if all CPUs the have checked in.
+ @retval FALSE if at least one Normal AP hasn't checked in.
+
+**/
+BOOLEAN
+AllCpusInSmmWithExceptions (
+ SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
+ )
+{
+ UINTN Index;
+ SMM_CPU_DATA_BLOCK *CpuData;
+ EFI_PROCESSOR_INFORMATION *ProcessorInfo;
+
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
+
+ if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
+ return TRUE;
+ }
+
+ CpuData = mSmmMpSyncData->CpuData;
+ ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (!(*(CpuData[Index].Present)) && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
+ if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) {
+ continue;
+ }
+ if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) {
+ continue;
+ }
+ if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) {
+ continue;
+ }
+ return FALSE;
+ }
+ }
+
+
+ return TRUE;
+}
+
+/**
+ Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
+
+ @retval TRUE Os enable lmce.
+ @retval FALSE Os not enable lmce.
+
+**/
+BOOLEAN
+IsLmceOsEnabled (
+ VOID
+ )
+{
+ MSR_IA32_MCG_CAP_REGISTER McgCap;
+ MSR_IA32_FEATURE_CONTROL_REGISTER FeatureCtrl;
+ MSR_IA32_MCG_EXT_CTL_REGISTER McgExtCtrl;
+
+ McgCap.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_CAP);
+ if (McgCap.Bits.MCG_LMCE_P == 0) {
+ return FALSE;
+ }
+
+ FeatureCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_FEATURE_CONTROL);
+ if (FeatureCtrl.Bits.LmceOn == 0) {
+ return FALSE;
+ }
+
+ McgExtCtrl.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_EXT_CTL);
+ return (BOOLEAN) (McgExtCtrl.Bits.LMCE_EN == 1);
+}
+
+/**
+ Return if Local machine check exception signaled.
+
+ Indicates (when set) that a local machine check exception was generated. This indicates that the current machine-check event was
+ delivered to only the logical processor.
+
+ @retval TRUE LMCE was signaled.
+ @retval FALSE LMCE was not signaled.
+
+**/
+BOOLEAN
+IsLmceSignaled (
+ VOID
+ )
+{
+ MSR_IA32_MCG_STATUS_REGISTER McgStatus;
+
+ McgStatus.Uint64 = AsmReadMsr64 (MSR_IA32_MCG_STATUS);
+ return (BOOLEAN) (McgStatus.Bits.LMCE_S == 1);
+}
+
+/**
+ Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before
+ entering SMM, except SMI disabled APs.
+
+**/
+VOID
+SmmWaitForApArrival (
+ VOID
+ )
+{
+ UINT64 Timer;
+ UINTN Index;
+ BOOLEAN LmceEn;
+ BOOLEAN LmceSignal;
+
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
+
+ LmceEn = FALSE;
+ LmceSignal = FALSE;
+ if (mMachineCheckSupported) {
+ LmceEn = IsLmceOsEnabled ();
+ LmceSignal = IsLmceSignaled();
+ }
+
+ //
+ // Platform implementor should choose a timeout value appropriately:
+ // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note
+ // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run.
+ // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI
+ // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will
+ // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the
+ // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state.
+ // - The timeout value must be longer than longest possible IO operation in the system
+ //
+
+ //
+ // Sync with APs 1st timeout
+ //
+ for (Timer = StartSyncTimer ();
+ !IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal) &&
+ !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
+ ) {
+ CpuPause ();
+ }
+
+ //
+ // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs,
+ // because:
+ // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running
+ // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they
+ // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode
+ // work while SMI handling is on-going.
+ // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run.
+ // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state
+ // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal
+ // mode work while SMI handling is on-going.
+ // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because:
+ // - In traditional flow, SMI disabling is discouraged.
+ // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function.
+ // In both cases, adding SMI-disabling checking code increases overhead.
+ //
+ if (*mSmmMpSyncData->Counter < mNumberOfCpus) {
+ //
+ // Send SMI IPIs to bring outside processors in
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (!(*(mSmmMpSyncData->CpuData[Index].Present)) && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) {
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
+ }
+ }
+
+ //
+ // Sync with APs 2nd timeout.
+ //
+ for (Timer = StartSyncTimer ();
+ !IsSyncTimerTimeout (Timer) &&
+ !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED );
+ ) {
+ CpuPause ();
+ }
+ }
+
+ return;
+}
+
+
+/**
+ Replace OS MTRR's with SMI MTRR's.
+
+ @param CpuIndex Processor Index
+
+**/
+VOID
+ReplaceOSMtrrs (
+ IN UINTN CpuIndex
+ )
+{
+ SmmCpuFeaturesDisableSmrr ();
+
+ //
+ // Replace all MTRRs registers
+ //
+ MtrrSetAllMtrrs (&gSmiMtrrs);
+}
+
+/**
+ Wheck whether task has been finished by all APs.
+
+ @param BlockMode Whether did it in block mode or non-block mode.
+
+ @retval TRUE Task has been finished by all APs.
+ @retval FALSE Task not has been finished by all APs.
+
+**/
+BOOLEAN
+WaitForAllAPsNotBusy (
+ IN BOOLEAN BlockMode
+ )
+{
+ UINTN Index;
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ //
+ // Ignore BSP and APs which not call in SMM.
+ //
+ if (!IsPresentAp(Index)) {
+ continue;
+ }
+
+ if (BlockMode) {
+ AcquireSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
+ ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
+ } else {
+ if (AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[Index].Busy)) {
+ ReleaseSpinLock(mSmmMpSyncData->CpuData[Index].Busy);
+ } else {
+ return FALSE;
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+/**
+ Check whether it is an present AP.
+
+ @param CpuIndex The AP index which calls this function.
+
+ @retval TRUE It's a present AP.
+ @retval TRUE This is not an AP or it is not present.
+
+**/
+BOOLEAN
+IsPresentAp (
+ IN UINTN CpuIndex
+ )
+{
+ return ((CpuIndex != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) &&
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present));
+}
+
+/**
+ Clean up the status flags used during executing the procedure.
+
+ @param CpuIndex The AP index which calls this function.
+
+**/
+VOID
+ReleaseToken (
+ IN UINTN CpuIndex
+ )
+{
+ PROCEDURE_TOKEN *Token;
+
+ Token = mSmmMpSyncData->CpuData[CpuIndex].Token;
+
+ if (InterlockedDecrement (&Token->RunningApCount) == 0) {
+ ReleaseSpinLock (Token->SpinLock);
+ }
+
+ mSmmMpSyncData->CpuData[CpuIndex].Token = NULL;
+}
+
+/**
+ Free the tokens in the maintained list.
+
+**/
+VOID
+ResetTokens (
+ VOID
+ )
+{
+ //
+ // Reset the FirstFreeToken to the beginning of token list upon exiting SMI.
+ //
+ gSmmCpuPrivate->FirstFreeToken = GetFirstNode (&gSmmCpuPrivate->TokenList);
+}
+
+/**
+ SMI handler for BSP.
+
+ @param CpuIndex BSP processor Index
+ @param SyncMode SMM MP sync mode
+
+**/
+VOID
+BSPHandler (
+ IN UINTN CpuIndex,
+ IN SMM_CPU_SYNC_MODE SyncMode
+ )
+{
+ UINTN Index;
+ MTRR_SETTINGS Mtrrs;
+ UINTN ApCount;
+ BOOLEAN ClearTopLevelSmiResult;
+ UINTN PresentCount;
+
+ ASSERT (CpuIndex == mSmmMpSyncData->BspIndex);
+ ApCount = 0;
+
+ //
+ // Flag BSP's presence
+ //
+ *mSmmMpSyncData->InsideSmm = TRUE;
+
+ //
+ // Initialize Debug Agent to start source level debug in BSP handler
+ //
+ InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL);
+
+ //
+ // Mark this processor's presence
+ //
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
+
+ //
+ // Clear platform top level SMI status bit before calling SMI handlers. If
+ // we cleared it after SMI handlers are run, we would miss the SMI that
+ // occurs after SMI handlers are done and before SMI status bit is cleared.
+ //
+ ClearTopLevelSmiResult = ClearTopLevelSmiStatus();
+ ASSERT (ClearTopLevelSmiResult == TRUE);
+
+ //
+ // Set running processor index
+ //
+ gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex;
+
+ //
+ // If Traditional Sync Mode or need to configure MTRRs: gather all available APs.
+ //
+ if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
+
+ //
+ // Wait for APs to arrive
+ //
+ SmmWaitForApArrival();
+
+ //
+ // Lock the counter down and retrieve the number of APs
+ //
+ *mSmmMpSyncData->AllCpusInSync = TRUE;
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
+
+ //
+ // Wait for all APs to get ready for programming MTRRs
+ //
+ WaitForAllAPs (ApCount);
+
+ if (SmmCpuFeaturesNeedConfigureMtrrs()) {
+ //
+ // Signal all APs it's time for backup MTRRs
+ //
+ ReleaseAllAPs ();
+
+ //
+ // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
+ // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
+ // to a large enough value to avoid this situation.
+ // Note: For HT capable CPUs, threads within a core share the same set of MTRRs.
+ // We do the backup first and then set MTRR to avoid race condition for threads
+ // in the same core.
+ //
+ MtrrGetAllMtrrs(&Mtrrs);
+
+ //
+ // Wait for all APs to complete their MTRR saving
+ //
+ WaitForAllAPs (ApCount);
+
+ //
+ // Let all processors program SMM MTRRs together
+ //
+ ReleaseAllAPs ();
+
+ //
+ // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at
+ // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set
+ // to a large enough value to avoid this situation.
+ //
+ ReplaceOSMtrrs (CpuIndex);
+
+ //
+ // Wait for all APs to complete their MTRR programming
+ //
+ WaitForAllAPs (ApCount);
+ }
+ }
+
+ //
+ // The BUSY lock is initialized to Acquired state
+ //
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
+
+ //
+ // Perform the pre tasks
+ //
+ PerformPreTasks ();
+
+ //
+ // Invoke SMM Foundation EntryPoint with the processor information context.
+ //
+ gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext);
+
+ //
+ // Make sure all APs have completed their pending none-block tasks
+ //
+ WaitForAllAPsNotBusy (TRUE);
+
+ //
+ // Perform the remaining tasks
+ //
+ PerformRemainingTasks ();
+
+ //
+ // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and
+ // make those APs to exit SMI synchronously. APs which arrive later will be excluded and
+ // will run through freely.
+ //
+ if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) {
+
+ //
+ // Lock the counter down and retrieve the number of APs
+ //
+ *mSmmMpSyncData->AllCpusInSync = TRUE;
+ ApCount = LockdownSemaphore (mSmmMpSyncData->Counter) - 1;
+ //
+ // Make sure all APs have their Present flag set
+ //
+ while (TRUE) {
+ PresentCount = 0;
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (*(mSmmMpSyncData->CpuData[Index].Present)) {
+ PresentCount ++;
+ }
+ }
+ if (PresentCount > ApCount) {
+ break;
+ }
+ }
+ }
+
+ //
+ // Notify all APs to exit
+ //
+ *mSmmMpSyncData->InsideSmm = FALSE;
+ ReleaseAllAPs ();
+
+ //
+ // Wait for all APs to complete their pending tasks
+ //
+ WaitForAllAPs (ApCount);
+
+ if (SmmCpuFeaturesNeedConfigureMtrrs()) {
+ //
+ // Signal APs to restore MTRRs
+ //
+ ReleaseAllAPs ();
+
+ //
+ // Restore OS MTRRs
+ //
+ SmmCpuFeaturesReenableSmrr ();
+ MtrrSetAllMtrrs(&Mtrrs);
+
+ //
+ // Wait for all APs to complete MTRR programming
+ //
+ WaitForAllAPs (ApCount);
+ }
+
+ //
+ // Stop source level debug in BSP handler, the code below will not be
+ // debugged.
+ //
+ InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL);
+
+ //
+ // Signal APs to Reset states/semaphore for this processor
+ //
+ ReleaseAllAPs ();
+
+ //
+ // Perform pending operations for hot-plug
+ //
+ SmmCpuUpdate ();
+
+ //
+ // Clear the Present flag of BSP
+ //
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
+
+ //
+ // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but
+ // WaitForAllAps does not depend on the Present flag.
+ //
+ WaitForAllAPs (ApCount);
+
+ //
+ // Reset the tokens buffer.
+ //
+ ResetTokens ();
+
+ //
+ // Reset BspIndex to -1, meaning BSP has not been elected.
+ //
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
+ mSmmMpSyncData->BspIndex = (UINT32)-1;
+ }
+
+ //
+ // Allow APs to check in from this point on
+ //
+ *mSmmMpSyncData->Counter = 0;
+ *mSmmMpSyncData->AllCpusInSync = FALSE;
+}
+
+/**
+ SMI handler for AP.
+
+ @param CpuIndex AP processor Index.
+ @param ValidSmi Indicates that current SMI is a valid SMI or not.
+ @param SyncMode SMM MP sync mode.
+
+**/
+VOID
+APHandler (
+ IN UINTN CpuIndex,
+ IN BOOLEAN ValidSmi,
+ IN SMM_CPU_SYNC_MODE SyncMode
+ )
+{
+ UINT64 Timer;
+ UINTN BspIndex;
+ MTRR_SETTINGS Mtrrs;
+ EFI_STATUS ProcedureStatus;
+
+ //
+ // Timeout BSP
+ //
+ for (Timer = StartSyncTimer ();
+ !IsSyncTimerTimeout (Timer) &&
+ !(*mSmmMpSyncData->InsideSmm);
+ ) {
+ CpuPause ();
+ }
+
+ if (!(*mSmmMpSyncData->InsideSmm)) {
+ //
+ // BSP timeout in the first round
+ //
+ if (mSmmMpSyncData->BspIndex != -1) {
+ //
+ // BSP Index is known
+ //
+ BspIndex = mSmmMpSyncData->BspIndex;
+ ASSERT (CpuIndex != BspIndex);
+
+ //
+ // Send SMI IPI to bring BSP in
+ //
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId);
+
+ //
+ // Now clock BSP for the 2nd time
+ //
+ for (Timer = StartSyncTimer ();
+ !IsSyncTimerTimeout (Timer) &&
+ !(*mSmmMpSyncData->InsideSmm);
+ ) {
+ CpuPause ();
+ }
+
+ if (!(*mSmmMpSyncData->InsideSmm)) {
+ //
+ // Give up since BSP is unable to enter SMM
+ // and signal the completion of this AP
+ WaitForSemaphore (mSmmMpSyncData->Counter);
+ return;
+ }
+ } else {
+ //
+ // Don't know BSP index. Give up without sending IPI to BSP.
+ //
+ WaitForSemaphore (mSmmMpSyncData->Counter);
+ return;
+ }
+ }
+
+ //
+ // BSP is available
+ //
+ BspIndex = mSmmMpSyncData->BspIndex;
+ ASSERT (CpuIndex != BspIndex);
+
+ //
+ // Mark this processor's presence
+ //
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = TRUE;
+
+ if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) {
+ //
+ // Notify BSP of arrival at this point
+ //
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
+ }
+
+ if (SmmCpuFeaturesNeedConfigureMtrrs()) {
+ //
+ // Wait for the signal from BSP to backup MTRRs
+ //
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Backup OS MTRRs
+ //
+ MtrrGetAllMtrrs(&Mtrrs);
+
+ //
+ // Signal BSP the completion of this AP
+ //
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
+
+ //
+ // Wait for BSP's signal to program MTRRs
+ //
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Replace OS MTRRs with SMI MTRRs
+ //
+ ReplaceOSMtrrs (CpuIndex);
+
+ //
+ // Signal BSP the completion of this AP
+ //
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
+ }
+
+ while (TRUE) {
+ //
+ // Wait for something to happen
+ //
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Check if BSP wants to exit SMM
+ //
+ if (!(*mSmmMpSyncData->InsideSmm)) {
+ break;
+ }
+
+ //
+ // BUSY should be acquired by SmmStartupThisAp()
+ //
+ ASSERT (
+ !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)
+ );
+
+ //
+ // Invoke the scheduled procedure
+ //
+ ProcedureStatus = (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) (
+ (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter
+ );
+ if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
+ *mSmmMpSyncData->CpuData[CpuIndex].Status = ProcedureStatus;
+ }
+
+ if (mSmmMpSyncData->CpuData[CpuIndex].Token != NULL) {
+ ReleaseToken (CpuIndex);
+ }
+
+ //
+ // Release BUSY
+ //
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ }
+
+ if (SmmCpuFeaturesNeedConfigureMtrrs()) {
+ //
+ // Notify BSP the readiness of this AP to program MTRRs
+ //
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
+
+ //
+ // Wait for the signal from BSP to program MTRRs
+ //
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Restore OS MTRRs
+ //
+ SmmCpuFeaturesReenableSmrr ();
+ MtrrSetAllMtrrs(&Mtrrs);
+ }
+
+ //
+ // Notify BSP the readiness of this AP to Reset states/semaphore for this processor
+ //
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
+
+ //
+ // Wait for the signal from BSP to Reset states/semaphore for this processor
+ //
+ WaitForSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ //
+ // Reset states/semaphore for this processor
+ //
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
+
+ //
+ // Notify BSP the readiness of this AP to exit SMM
+ //
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[BspIndex].Run);
+
+}
+
+/**
+ Create 4G PageTable in SMRAM.
+
+ @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
+ @return PageTable Address
+
+**/
+UINT32
+Gen4GPageTable (
+ IN BOOLEAN Is32BitPageTable
+ )
+{
+ VOID *PageTable;
+ UINTN Index;
+ UINT64 *Pte;
+ UINTN PagesNeeded;
+ UINTN Low2MBoundary;
+ UINTN High2MBoundary;
+ UINTN Pages;
+ UINTN GuardPage;
+ UINT64 *Pdpte;
+ UINTN PageIndex;
+ UINTN PageAddress;
+
+ Low2MBoundary = 0;
+ High2MBoundary = 0;
+ PagesNeeded = 0;
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // Add one more page for known good stack, then find the lower 2MB aligned address.
+ //
+ Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1);
+ //
+ // Add two more pages for known good stack and stack guard page,
+ // then find the lower 2MB aligned address.
+ //
+ High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize - mSmmShadowStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1);
+ PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1;
+ }
+ //
+ // Allocate the page table
+ //
+ PageTable = AllocatePageTableMemory (5 + PagesNeeded);
+ ASSERT (PageTable != NULL);
+
+ PageTable = (VOID *)((UINTN)PageTable);
+ Pte = (UINT64*)PageTable;
+
+ //
+ // Zero out all page table entries first
+ //
+ ZeroMem (Pte, EFI_PAGES_TO_SIZE (1));
+
+ //
+ // Set Page Directory Pointers
+ //
+ for (Index = 0; Index < 4; Index++) {
+ Pte[Index] = ((UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1)) | mAddressEncMask |
+ (Is32BitPageTable ? IA32_PAE_PDPTE_ATTRIBUTE_BITS : PAGE_ATTRIBUTE_BITS);
+ }
+ Pte += EFI_PAGE_SIZE / sizeof (*Pte);
+
+ //
+ // Fill in Page Directory Entries
+ //
+ for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) {
+ Pte[Index] = (Index << 21) | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+ }
+
+ Pdpte = (UINT64*)PageTable;
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5);
+ GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE;
+ for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) {
+ Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
+ Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ //
+ // Fill in Page Table Entries
+ //
+ Pte = (UINT64*)Pages;
+ PageAddress = PageIndex;
+ for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
+ if (PageAddress == GuardPage) {
+ //
+ // Mark the guard page as non-present
+ //
+ Pte[Index] = PageAddress | mAddressEncMask;
+ GuardPage += (mSmmStackSize + mSmmShadowStackSize);
+ if (GuardPage > mSmmStackArrayEnd) {
+ GuardPage = 0;
+ }
+ } else {
+ Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+ PageAddress+= EFI_PAGE_SIZE;
+ }
+ Pages += EFI_PAGE_SIZE;
+ }
+ }
+
+ if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0) {
+ Pte = (UINT64*)(UINTN)(Pdpte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
+ if ((Pte[0] & IA32_PG_PS) == 0) {
+ // 4K-page entries are already mapped. Just hide the first one anyway.
+ Pte = (UINT64*)(UINTN)(Pte[0] & ~mAddressEncMask & ~(EFI_PAGE_SIZE - 1));
+ Pte[0] &= ~(UINT64)IA32_PG_P; // Hide page 0
+ } else {
+ // Create 4K-page entries
+ Pages = (UINTN)AllocatePageTableMemory (1);
+ ASSERT (Pages != 0);
+
+ Pte[0] = (UINT64)(Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
+
+ Pte = (UINT64*)Pages;
+ PageAddress = 0;
+ Pte[0] = PageAddress | mAddressEncMask; // Hide page 0 but present left
+ for (Index = 1; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) {
+ PageAddress += EFI_PAGE_SIZE;
+ Pte[Index] = PageAddress | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+ }
+ }
+
+ return (UINT32)(UINTN)PageTable;
+}
+
+/**
+ Checks whether the input token is the current used token.
+
+ @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
+ BroadcastProcedure.
+
+ @retval TRUE The input token is the current used token.
+ @retval FALSE The input token is not the current used token.
+**/
+BOOLEAN
+IsTokenInUse (
+ IN SPIN_LOCK *Token
+ )
+{
+ LIST_ENTRY *Link;
+ PROCEDURE_TOKEN *ProcToken;
+
+ if (Token == NULL) {
+ return FALSE;
+ }
+
+ Link = GetFirstNode (&gSmmCpuPrivate->TokenList);
+ //
+ // Only search used tokens.
+ //
+ while (Link != gSmmCpuPrivate->FirstFreeToken) {
+ ProcToken = PROCEDURE_TOKEN_FROM_LINK (Link);
+
+ if (ProcToken->SpinLock == Token) {
+ return TRUE;
+ }
+
+ Link = GetNextNode (&gSmmCpuPrivate->TokenList, Link);
+ }
+
+ return FALSE;
+}
+
+/**
+ Allocate buffer for the SPIN_LOCK and PROCEDURE_TOKEN.
+
+ @return First token of the token buffer.
+**/
+LIST_ENTRY *
+AllocateTokenBuffer (
+ VOID
+ )
+{
+ UINTN SpinLockSize;
+ UINT32 TokenCountPerChunk;
+ UINTN Index;
+ SPIN_LOCK *SpinLock;
+ UINT8 *SpinLockBuffer;
+ PROCEDURE_TOKEN *ProcTokens;
+
+ SpinLockSize = GetSpinLockProperties ();
+
+ TokenCountPerChunk = FixedPcdGet32 (PcdCpuSmmMpTokenCountPerChunk);
+ ASSERT (TokenCountPerChunk != 0);
+ if (TokenCountPerChunk == 0) {
+ DEBUG ((DEBUG_ERROR, "PcdCpuSmmMpTokenCountPerChunk should not be Zero!\n"));
+ CpuDeadLoop ();
+ }
+ DEBUG ((DEBUG_INFO, "CpuSmm: SpinLock Size = 0x%x, PcdCpuSmmMpTokenCountPerChunk = 0x%x\n", SpinLockSize, TokenCountPerChunk));
+
+ //
+ // Separate the Spin_lock and Proc_token because the alignment requires by Spin_Lock.
+ //
+ SpinLockBuffer = AllocatePool (SpinLockSize * TokenCountPerChunk);
+ ASSERT (SpinLockBuffer != NULL);
+
+ ProcTokens = AllocatePool (sizeof (PROCEDURE_TOKEN) * TokenCountPerChunk);
+ ASSERT (ProcTokens != NULL);
+
+ for (Index = 0; Index < TokenCountPerChunk; Index++) {
+ SpinLock = (SPIN_LOCK *)(SpinLockBuffer + SpinLockSize * Index);
+ InitializeSpinLock (SpinLock);
+
+ ProcTokens[Index].Signature = PROCEDURE_TOKEN_SIGNATURE;
+ ProcTokens[Index].SpinLock = SpinLock;
+ ProcTokens[Index].RunningApCount = 0;
+
+ InsertTailList (&gSmmCpuPrivate->TokenList, &ProcTokens[Index].Link);
+ }
+
+ return &ProcTokens[0].Link;
+}
+
+/**
+ Get the free token.
+
+ If no free token, allocate new tokens then return the free one.
+
+ @param RunningApsCount The Running Aps count for this token.
+
+ @retval return the first free PROCEDURE_TOKEN.
+
+**/
+PROCEDURE_TOKEN *
+GetFreeToken (
+ IN UINT32 RunningApsCount
+ )
+{
+ PROCEDURE_TOKEN *NewToken;
+
+ //
+ // If FirstFreeToken meets the end of token list, enlarge the token list.
+ // Set FirstFreeToken to the first free token.
+ //
+ if (gSmmCpuPrivate->FirstFreeToken == &gSmmCpuPrivate->TokenList) {
+ gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
+ }
+ NewToken = PROCEDURE_TOKEN_FROM_LINK (gSmmCpuPrivate->FirstFreeToken);
+ gSmmCpuPrivate->FirstFreeToken = GetNextNode (&gSmmCpuPrivate->TokenList, gSmmCpuPrivate->FirstFreeToken);
+
+ NewToken->RunningApCount = RunningApsCount;
+ AcquireSpinLock (NewToken->SpinLock);
+
+ return NewToken;
+}
+
+/**
+ Checks status of specified AP.
+
+ This function checks whether the specified AP has finished the task assigned
+ by StartupThisAP(), and whether timeout expires.
+
+ @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
+ BroadcastProcedure.
+
+ @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
+ @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
+**/
+EFI_STATUS
+IsApReady (
+ IN SPIN_LOCK *Token
+ )
+{
+ if (AcquireSpinLockOrFail (Token)) {
+ ReleaseSpinLock (Token);
+ return EFI_SUCCESS;
+ }
+
+ return EFI_NOT_READY;
+}
+
+/**
+ Schedule a procedure to run on the specified CPU.
+
+ @param[in] Procedure The address of the procedure to run
+ @param[in] CpuIndex Target CPU Index
+ @param[in,out] ProcArguments The parameter to pass to the procedure
+ @param[in] Token This is an optional parameter that allows the caller to execute the
+ procedure in a blocking or non-blocking fashion. If it is NULL the
+ call is blocking, and the call will not return until the AP has
+ completed the procedure. If the token is not NULL, the call will
+ return immediately. The caller can check whether the procedure has
+ completed with CheckOnProcedure or WaitForProcedure.
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
+ execution of Procedure, either for blocking or non-blocking mode.
+ Zero means infinity. If the timeout expires before all APs return
+ from Procedure, then Procedure on the failed APs is terminated. If
+ the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
+ If the timeout expires in non-blocking mode, the timeout determined
+ can be through CheckOnProcedure or WaitForProcedure.
+ Note that timeout support is optional. Whether an implementation
+ supports this feature can be determined via the Attributes data
+ member.
+ @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
+ by Procedure when it completes execution on the target AP, or with
+ EFI_TIMEOUT if the Procedure fails to complete within the optional
+ timeout. The implementation will update this variable with
+ EFI_NOT_READY prior to starting Procedure on the target AP.
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+InternalSmmStartupThisAp (
+ IN EFI_AP_PROCEDURE2 Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL,
+ IN MM_COMPLETION *Token,
+ IN UINTN TimeoutInMicroseconds,
+ IN OUT EFI_STATUS *CpuStatus
+ )
+{
+ PROCEDURE_TOKEN *ProcToken;
+
+ if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
+ DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ return EFI_INVALID_PARAMETER;
+ }
+ if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
+ DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
+ return EFI_INVALID_PARAMETER;
+ }
+ if (gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId == INVALID_APIC_ID) {
+ return EFI_INVALID_PARAMETER;
+ }
+ if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
+ if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
+ DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
+ }
+ return EFI_INVALID_PARAMETER;
+ }
+ if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
+ }
+ return EFI_INVALID_PARAMETER;
+ }
+ if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
+ return EFI_INVALID_PARAMETER;
+ }
+ if (Procedure == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
+
+ mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
+ mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
+ if (Token != NULL) {
+ if (Token != &mSmmStartupThisApToken) {
+ //
+ // When Token points to mSmmStartupThisApToken, this routine is called
+ // from SmmStartupThisAp() in non-blocking mode (PcdCpuSmmBlockStartupThisAp == FALSE).
+ //
+ // In this case, caller wants to startup AP procedure in non-blocking
+ // mode and cannot get the completion status from the Token because there
+ // is no way to return the Token to caller from SmmStartupThisAp().
+ // Caller needs to use its implementation specific way to query the completion status.
+ //
+ // There is no need to allocate a token for such case so the 3 overheads
+ // can be avoided:
+ // 1. Call AllocateTokenBuffer() when there is no free token.
+ // 2. Get a free token from the token buffer.
+ // 3. Call ReleaseToken() in APHandler().
+ //
+ ProcToken = GetFreeToken (1);
+ mSmmMpSyncData->CpuData[CpuIndex].Token = ProcToken;
+ *Token = (MM_COMPLETION)ProcToken->SpinLock;
+ }
+ }
+ mSmmMpSyncData->CpuData[CpuIndex].Status = CpuStatus;
+ if (mSmmMpSyncData->CpuData[CpuIndex].Status != NULL) {
+ *mSmmMpSyncData->CpuData[CpuIndex].Status = EFI_NOT_READY;
+ }
+
+ ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
+
+ if (Token == NULL) {
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Worker function to execute a caller provided function on all enabled APs.
+
+ @param[in] Procedure A pointer to the function to be run on
+ enabled APs of the system.
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
+ APs to return from Procedure, either for
+ blocking or non-blocking mode.
+ @param[in,out] ProcedureArguments The parameter passed into Procedure for
+ all APs.
+ @param[in,out] Token This is an optional parameter that allows the caller to execute the
+ procedure in a blocking or non-blocking fashion. If it is NULL the
+ call is blocking, and the call will not return until the AP has
+ completed the procedure. If the token is not NULL, the call will
+ return immediately. The caller can check whether the procedure has
+ completed with CheckOnProcedure or WaitForProcedure.
+ @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
+ by Procedure when it completes execution on the target AP, or with
+ EFI_TIMEOUT if the Procedure fails to complete within the optional
+ timeout. The implementation will update this variable with
+ EFI_NOT_READY prior to starting Procedure on the target AP.
+
+
+ @retval EFI_SUCCESS In blocking mode, all APs have finished before
+ the timeout expired.
+ @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
+ to all enabled APs.
+ @retval others Failed to Startup all APs.
+
+**/
+EFI_STATUS
+InternalSmmStartupAllAPs (
+ IN EFI_AP_PROCEDURE2 Procedure,
+ IN UINTN TimeoutInMicroseconds,
+ IN OUT VOID *ProcedureArguments OPTIONAL,
+ IN OUT MM_COMPLETION *Token,
+ IN OUT EFI_STATUS *CPUStatus
+ )
+{
+ UINTN Index;
+ UINTN CpuCount;
+ PROCEDURE_TOKEN *ProcToken;
+
+ if ((TimeoutInMicroseconds != 0) && ((mSmmMp.Attributes & EFI_MM_MP_TIMEOUT_SUPPORTED) == 0)) {
+ return EFI_INVALID_PARAMETER;
+ }
+ if (Procedure == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ CpuCount = 0;
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (IsPresentAp (Index)) {
+ CpuCount ++;
+
+ if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (!AcquireSpinLockOrFail(mSmmMpSyncData->CpuData[Index].Busy)) {
+ return EFI_NOT_READY;
+ }
+ ReleaseSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
+ }
+ }
+ if (CpuCount == 0) {
+ return EFI_NOT_STARTED;
+ }
+
+ if (Token != NULL) {
+ ProcToken = GetFreeToken ((UINT32)mMaxNumberOfCpus);
+ *Token = (MM_COMPLETION)ProcToken->SpinLock;
+ } else {
+ ProcToken = NULL;
+ }
+
+ //
+ // Make sure all BUSY should be acquired.
+ //
+ // Because former code already check mSmmMpSyncData->CpuData[***].Busy for each AP.
+ // Here code always use AcquireSpinLock instead of AcquireSpinLockOrFail for not
+ // block mode.
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (IsPresentAp (Index)) {
+ AcquireSpinLock (mSmmMpSyncData->CpuData[Index].Busy);
+ }
+ }
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (IsPresentAp (Index)) {
+ mSmmMpSyncData->CpuData[Index].Procedure = (EFI_AP_PROCEDURE2) Procedure;
+ mSmmMpSyncData->CpuData[Index].Parameter = ProcedureArguments;
+ if (ProcToken != NULL) {
+ mSmmMpSyncData->CpuData[Index].Token = ProcToken;
+ }
+ if (CPUStatus != NULL) {
+ mSmmMpSyncData->CpuData[Index].Status = &CPUStatus[Index];
+ if (mSmmMpSyncData->CpuData[Index].Status != NULL) {
+ *mSmmMpSyncData->CpuData[Index].Status = EFI_NOT_READY;
+ }
+ }
+ } else {
+ //
+ // PI spec requirement:
+ // For every excluded processor, the array entry must contain a value of EFI_NOT_STARTED.
+ //
+ if (CPUStatus != NULL) {
+ CPUStatus[Index] = EFI_NOT_STARTED;
+ }
+
+ //
+ // Decrease the count to mark this processor(AP or BSP) as finished.
+ //
+ if (ProcToken != NULL) {
+ WaitForSemaphore (&ProcToken->RunningApCount);
+ }
+ }
+ }
+
+ ReleaseAllAPs ();
+
+ if (Token == NULL) {
+ //
+ // Make sure all APs have completed their tasks.
+ //
+ WaitForAllAPsNotBusy (TRUE);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ ISO C99 6.5.2.2 "Function calls", paragraph 9:
+ If the function is defined with a type that is not compatible with
+ the type (of the expression) pointed to by the expression that
+ denotes the called function, the behavior is undefined.
+
+ So add below wrapper function to convert between EFI_AP_PROCEDURE
+ and EFI_AP_PROCEDURE2.
+
+ Wrapper for Procedures.
+
+ @param[in] Buffer Pointer to PROCEDURE_WRAPPER buffer.
+
+**/
+EFI_STATUS
+EFIAPI
+ProcedureWrapper (
+ IN VOID *Buffer
+ )
+{
+ PROCEDURE_WRAPPER *Wrapper;
+
+ Wrapper = Buffer;
+ Wrapper->Procedure (Wrapper->ProcedureArgument);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Schedule a procedure to run on the specified CPU in blocking mode.
+
+ @param[in] Procedure The address of the procedure to run
+ @param[in] CpuIndex Target CPU Index
+ @param[in, out] ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmBlockingStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ )
+{
+ PROCEDURE_WRAPPER Wrapper;
+
+ Wrapper.Procedure = Procedure;
+ Wrapper.ProcedureArgument = ProcArguments;
+
+ //
+ // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
+ //
+ return InternalSmmStartupThisAp (ProcedureWrapper, CpuIndex, &Wrapper, NULL, 0, NULL);
+}
+
+/**
+ Schedule a procedure to run on the specified CPU.
+
+ @param Procedure The address of the procedure to run
+ @param CpuIndex Target CPU Index
+ @param ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ )
+{
+ gSmmCpuPrivate->ApWrapperFunc[CpuIndex].Procedure = Procedure;
+ gSmmCpuPrivate->ApWrapperFunc[CpuIndex].ProcedureArgument = ProcArguments;
+
+ //
+ // Use wrapper function to convert EFI_AP_PROCEDURE to EFI_AP_PROCEDURE2.
+ //
+ return InternalSmmStartupThisAp (
+ ProcedureWrapper,
+ CpuIndex,
+ &gSmmCpuPrivate->ApWrapperFunc[CpuIndex],
+ FeaturePcdGet (PcdCpuSmmBlockStartupThisAp) ? NULL : &mSmmStartupThisApToken,
+ 0,
+ NULL
+ );
+}
+
+/**
+ This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
+ They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
+
+ NOTE: It might not be appreciated in runtime since it might
+ conflict with OS debugging facilities. Turn them off in RELEASE.
+
+ @param CpuIndex CPU Index
+
+**/
+VOID
+EFIAPI
+CpuSmmDebugEntry (
+ IN UINTN CpuIndex
+ )
+{
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;
+
+ if (FeaturePcdGet (PcdCpuSmmDebug)) {
+ ASSERT(CpuIndex < mMaxNumberOfCpus);
+ CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
+ AsmWriteDr6 (CpuSaveState->x86._DR6);
+ AsmWriteDr7 (CpuSaveState->x86._DR7);
+ } else {
+ AsmWriteDr6 ((UINTN)CpuSaveState->x64._DR6);
+ AsmWriteDr7 ((UINTN)CpuSaveState->x64._DR7);
+ }
+ }
+}
+
+/**
+ This function restores DR6 & DR7 to SMM save state.
+
+ NOTE: It might not be appreciated in runtime since it might
+ conflict with OS debugging facilities. Turn them off in RELEASE.
+
+ @param CpuIndex CPU Index
+
+**/
+VOID
+EFIAPI
+CpuSmmDebugExit (
+ IN UINTN CpuIndex
+ )
+{
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;
+
+ if (FeaturePcdGet (PcdCpuSmmDebug)) {
+ ASSERT(CpuIndex < mMaxNumberOfCpus);
+ CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
+ CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
+ CpuSaveState->x86._DR6 = (UINT32)AsmReadDr6 ();
+ } else {
+ CpuSaveState->x64._DR7 = AsmReadDr7 ();
+ CpuSaveState->x64._DR6 = AsmReadDr6 ();
+ }
+ }
+}
+
+/**
+ C function for SMI entry, each processor comes here upon SMI trigger.
+
+ @param CpuIndex CPU Index
+
+**/
+VOID
+EFIAPI
+SmiRendezvous (
+ IN UINTN CpuIndex
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN ValidSmi;
+ BOOLEAN IsBsp;
+ BOOLEAN BspInProgress;
+ UINTN Index;
+ UINTN Cr2;
+
+ ASSERT(CpuIndex < mMaxNumberOfCpus);
+
+ //
+ // Save Cr2 because Page Fault exception in SMM may override its value,
+ // when using on-demand paging for above 4G memory.
+ //
+ Cr2 = 0;
+ SaveCr2 (&Cr2);
+
+ //
+ // Call the user register Startup function first.
+ //
+ if (mSmmMpSyncData->StartupProcedure != NULL) {
+ mSmmMpSyncData->StartupProcedure (mSmmMpSyncData->StartupProcArgs);
+ }
+
+ //
+ // Perform CPU specific entry hooks
+ //
+ SmmCpuFeaturesRendezvousEntry (CpuIndex);
+
+ //
+ // Determine if this is a valid SMI
+ //
+ ValidSmi = PlatformValidSmi();
+
+ //
+ // Determine if BSP has been already in progress. Note this must be checked after
+ // ValidSmi because BSP may clear a valid SMI source after checking in.
+ //
+ BspInProgress = *mSmmMpSyncData->InsideSmm;
+
+ if (!BspInProgress && !ValidSmi) {
+ //
+ // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not
+ // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI
+ // status had been cleared by BSP and an existing SMI run has almost ended. (Note
+ // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there
+ // is nothing we need to do.
+ //
+ goto Exit;
+ } else {
+ //
+ // Signal presence of this processor
+ //
+ if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
+ //
+ // BSP has already ended the synchronization, so QUIT!!!
+ //
+
+ //
+ // Wait for BSP's signal to finish SMI
+ //
+ while (*mSmmMpSyncData->AllCpusInSync) {
+ CpuPause ();
+ }
+ goto Exit;
+ } else {
+
+ //
+ // The BUSY lock is initialized to Released state.
+ // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call.
+ // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately
+ // after AP's present flag is detected.
+ //
+ InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ ActivateSmmProfile (CpuIndex);
+ }
+
+ if (BspInProgress) {
+ //
+ // BSP has been elected. Follow AP path, regardless of ValidSmi flag
+ // as BSP may have cleared the SMI status
+ //
+ APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
+ } else {
+ //
+ // We have a valid SMI
+ //
+
+ //
+ // Elect BSP
+ //
+ IsBsp = FALSE;
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
+ if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) {
+ //
+ // Call platform hook to do BSP election
+ //
+ Status = PlatformSmmBspElection (&IsBsp);
+ if (EFI_SUCCESS == Status) {
+ //
+ // Platform hook determines successfully
+ //
+ if (IsBsp) {
+ mSmmMpSyncData->BspIndex = (UINT32)CpuIndex;
+ }
+ } else {
+ //
+ // Platform hook fails to determine, use default BSP election method
+ //
+ InterlockedCompareExchange32 (
+ (UINT32*)&mSmmMpSyncData->BspIndex,
+ (UINT32)-1,
+ (UINT32)CpuIndex
+ );
+ }
+ }
+ }
+
+ //
+ // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP
+ //
+ if (mSmmMpSyncData->BspIndex == CpuIndex) {
+
+ //
+ // Clear last request for SwitchBsp.
+ //
+ if (mSmmMpSyncData->SwitchBsp) {
+ mSmmMpSyncData->SwitchBsp = FALSE;
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ mSmmMpSyncData->CandidateBsp[Index] = FALSE;
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfileRecordSmiNum ();
+ }
+
+ //
+ // BSP Handler is always called with a ValidSmi == TRUE
+ //
+ BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode);
+ } else {
+ APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode);
+ }
+ }
+
+ ASSERT (*mSmmMpSyncData->CpuData[CpuIndex].Run == 0);
+
+ //
+ // Wait for BSP's signal to exit SMI
+ //
+ while (*mSmmMpSyncData->AllCpusInSync) {
+ CpuPause ();
+ }
+ }
+
+Exit:
+ SmmCpuFeaturesRendezvousExit (CpuIndex);
+
+ //
+ // Restore Cr2
+ //
+ RestoreCr2 (Cr2);
+}
+
+/**
+ Allocate buffer for SpinLock and Wrapper function buffer.
+
+**/
+VOID
+InitializeDataForMmMp (
+ VOID
+ )
+{
+ gSmmCpuPrivate->ApWrapperFunc = AllocatePool (sizeof (PROCEDURE_WRAPPER) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
+ ASSERT (gSmmCpuPrivate->ApWrapperFunc != NULL);
+
+ InitializeListHead (&gSmmCpuPrivate->TokenList);
+
+ gSmmCpuPrivate->FirstFreeToken = AllocateTokenBuffer ();
+}
+
+/**
+ Allocate buffer for all semaphores and spin locks.
+
+**/
+VOID
+InitializeSmmCpuSemaphores (
+ VOID
+ )
+{
+ UINTN ProcessorCount;
+ UINTN TotalSize;
+ UINTN GlobalSemaphoresSize;
+ UINTN CpuSemaphoresSize;
+ UINTN SemaphoreSize;
+ UINTN Pages;
+ UINTN *SemaphoreBlock;
+ UINTN SemaphoreAddr;
+
+ SemaphoreSize = GetSpinLockProperties ();
+ ProcessorCount = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GlobalSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_GLOBAL) / sizeof (VOID *)) * SemaphoreSize;
+ CpuSemaphoresSize = (sizeof (SMM_CPU_SEMAPHORE_CPU) / sizeof (VOID *)) * ProcessorCount * SemaphoreSize;
+ TotalSize = GlobalSemaphoresSize + CpuSemaphoresSize;
+ DEBUG((EFI_D_INFO, "One Semaphore Size = 0x%x\n", SemaphoreSize));
+ DEBUG((EFI_D_INFO, "Total Semaphores Size = 0x%x\n", TotalSize));
+ Pages = EFI_SIZE_TO_PAGES (TotalSize);
+ SemaphoreBlock = AllocatePages (Pages);
+ ASSERT (SemaphoreBlock != NULL);
+ ZeroMem (SemaphoreBlock, TotalSize);
+
+ SemaphoreAddr = (UINTN)SemaphoreBlock;
+ mSmmCpuSemaphores.SemaphoreGlobal.Counter = (UINT32 *)SemaphoreAddr;
+ SemaphoreAddr += SemaphoreSize;
+ mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm = (BOOLEAN *)SemaphoreAddr;
+ SemaphoreAddr += SemaphoreSize;
+ mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync = (BOOLEAN *)SemaphoreAddr;
+ SemaphoreAddr += SemaphoreSize;
+ mSmmCpuSemaphores.SemaphoreGlobal.PFLock = (SPIN_LOCK *)SemaphoreAddr;
+ SemaphoreAddr += SemaphoreSize;
+ mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
+ = (SPIN_LOCK *)SemaphoreAddr;
+ SemaphoreAddr += SemaphoreSize;
+
+ SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
+ mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
+ SemaphoreAddr += ProcessorCount * SemaphoreSize;
+ mSmmCpuSemaphores.SemaphoreCpu.Run = (UINT32 *)SemaphoreAddr;
+ SemaphoreAddr += ProcessorCount * SemaphoreSize;
+ mSmmCpuSemaphores.SemaphoreCpu.Present = (BOOLEAN *)SemaphoreAddr;
+
+ mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
+ mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
+
+ mSemaphoreSize = SemaphoreSize;
+}
+
+/**
+ Initialize un-cacheable data.
+
+**/
+VOID
+EFIAPI
+InitializeMpSyncData (
+ VOID
+ )
+{
+ UINTN CpuIndex;
+
+ if (mSmmMpSyncData != NULL) {
+ //
+ // mSmmMpSyncDataSize includes one structure of SMM_DISPATCHER_MP_SYNC_DATA, one
+ // CpuData array of SMM_CPU_DATA_BLOCK and one CandidateBsp array of BOOLEAN.
+ //
+ ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize);
+ mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA));
+ mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
+ if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) {
+ //
+ // Enable BSP election by setting BspIndex to -1
+ //
+ mSmmMpSyncData->BspIndex = (UINT32)-1;
+ }
+ mSmmMpSyncData->EffectiveSyncMode = mCpuSmmSyncMode;
+
+ mSmmMpSyncData->Counter = mSmmCpuSemaphores.SemaphoreGlobal.Counter;
+ mSmmMpSyncData->InsideSmm = mSmmCpuSemaphores.SemaphoreGlobal.InsideSmm;
+ mSmmMpSyncData->AllCpusInSync = mSmmCpuSemaphores.SemaphoreGlobal.AllCpusInSync;
+ ASSERT (mSmmMpSyncData->Counter != NULL && mSmmMpSyncData->InsideSmm != NULL &&
+ mSmmMpSyncData->AllCpusInSync != NULL);
+ *mSmmMpSyncData->Counter = 0;
+ *mSmmMpSyncData->InsideSmm = FALSE;
+ *mSmmMpSyncData->AllCpusInSync = FALSE;
+
+ for (CpuIndex = 0; CpuIndex < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; CpuIndex ++) {
+ mSmmMpSyncData->CpuData[CpuIndex].Busy =
+ (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Busy + mSemaphoreSize * CpuIndex);
+ mSmmMpSyncData->CpuData[CpuIndex].Run =
+ (UINT32 *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Run + mSemaphoreSize * CpuIndex);
+ mSmmMpSyncData->CpuData[CpuIndex].Present =
+ (BOOLEAN *)((UINTN)mSmmCpuSemaphores.SemaphoreCpu.Present + mSemaphoreSize * CpuIndex);
+ *(mSmmMpSyncData->CpuData[CpuIndex].Busy) = 0;
+ *(mSmmMpSyncData->CpuData[CpuIndex].Run) = 0;
+ *(mSmmMpSyncData->CpuData[CpuIndex].Present) = FALSE;
+ }
+ }
+}
+
+/**
+ Initialize global data for MP synchronization.
+
+ @param Stacks Base address of SMI stack buffer for all processors.
+ @param StackSize Stack size for each processor in SMM.
+ @param ShadowStackSize Shadow Stack size for each processor in SMM.
+
+**/
+UINT32
+InitializeMpServiceData (
+ IN VOID *Stacks,
+ IN UINTN StackSize,
+ IN UINTN ShadowStackSize
+ )
+{
+ UINT32 Cr3;
+ UINTN Index;
+ UINT8 *GdtTssTables;
+ UINTN GdtTableStepSize;
+ CPUID_VERSION_INFO_EDX RegEdx;
+ UINT32 MaxExtendedFunction;
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
+
+ //
+ // Determine if this CPU supports machine check
+ //
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx.Uint32);
+ mMachineCheckSupported = (BOOLEAN)(RegEdx.Bits.MCA == 1);
+
+ //
+ // Allocate memory for all locks and semaphores
+ //
+ InitializeSmmCpuSemaphores ();
+
+ //
+ // Initialize mSmmMpSyncData
+ //
+ mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) +
+ (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize));
+ ASSERT (mSmmMpSyncData != NULL);
+ mCpuSmmSyncMode = (SMM_CPU_SYNC_MODE)PcdGet8 (PcdCpuSmmSyncMode);
+ InitializeMpSyncData ();
+
+ //
+ // Initialize physical address mask
+ // NOTE: Physical memory above virtual address limit is not supported !!!
+ //
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunction, NULL, NULL, NULL);
+ if (MaxExtendedFunction >= CPUID_VIR_PHY_ADDRESS_SIZE) {
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
+ } else {
+ VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
+ }
+ gPhyMask = LShiftU64 (1, VirPhyAddressSize.Bits.PhysicalAddressBits) - 1;
+ //
+ // Clear the low 12 bits
+ //
+ gPhyMask &= 0xfffffffffffff000ULL;
+
+ //
+ // Create page tables
+ //
+ Cr3 = SmmInitPageTable ();
+
+ GdtTssTables = InitGdt (Cr3, &GdtTableStepSize);
+
+ //
+ // Install SMI handler for each CPU
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ InstallSmiHandler (
+ Index,
+ (UINT32)mCpuHotPlugData.SmBase[Index],
+ (VOID*)((UINTN)Stacks + (StackSize + ShadowStackSize) * Index),
+ StackSize,
+ (UINTN)(GdtTssTables + GdtTableStepSize * Index),
+ gcSmiGdtr.Limit + 1,
+ gcSmiIdtr.Base,
+ gcSmiIdtr.Limit + 1,
+ Cr3
+ );
+ }
+
+ //
+ // Record current MTRR settings
+ //
+ ZeroMem (&gSmiMtrrs, sizeof (gSmiMtrrs));
+ MtrrGetAllMtrrs (&gSmiMtrrs);
+
+ return Cr3;
+}
+
+/**
+
+ Register the SMM Foundation entry point.
+
+ @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
+ @param SmmEntryPoint SMM Foundation EntryPoint
+
+ @retval EFI_SUCCESS Successfully to register SMM foundation entry point
+
+**/
+EFI_STATUS
+EFIAPI
+RegisterSmmEntry (
+ IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
+ IN EFI_SMM_ENTRY_POINT SmmEntryPoint
+ )
+{
+ //
+ // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector.
+ //
+ gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint;
+ return EFI_SUCCESS;
+}
+
+/**
+
+ Register the SMM Foundation entry point.
+
+ @param[in] Procedure A pointer to the code stream to be run on the designated target AP
+ of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
+ with the related definitions of
+ EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
+ If caller may pass a value of NULL to deregister any existing
+ startup procedure.
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
+ run by the AP. It is an optional common mailbox between APs and
+ the caller to share information
+
+ @retval EFI_SUCCESS The Procedure has been set successfully.
+ @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
+
+**/
+EFI_STATUS
+RegisterStartupProcedure (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN OUT VOID *ProcedureArguments OPTIONAL
+ )
+{
+ if (Procedure == NULL && ProcedureArguments != NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+ if (mSmmMpSyncData == NULL) {
+ return EFI_NOT_READY;
+ }
+
+ mSmmMpSyncData->StartupProcedure = Procedure;
+ mSmmMpSyncData->StartupProcArgs = ProcedureArguments;
+
+ return EFI_SUCCESS;
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
new file mode 100644
index 00000000..028f4362
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
@@ -0,0 +1,1470 @@
+/** @file
+Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
+
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+//
+// SMM CPU Private Data structure that contains SMM Configuration Protocol
+// along its supporting fields.
+//
+SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = {
+ SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature
+ NULL, // SmmCpuHandle
+ NULL, // Pointer to ProcessorInfo array
+ NULL, // Pointer to Operation array
+ NULL, // Pointer to CpuSaveStateSize array
+ NULL, // Pointer to CpuSaveState array
+ { {0} }, // SmmReservedSmramRegion
+ {
+ SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp
+ 0, // SmmCoreEntryContext.CurrentlyExecutingCpu
+ 0, // SmmCoreEntryContext.NumberOfCpus
+ NULL, // SmmCoreEntryContext.CpuSaveStateSize
+ NULL // SmmCoreEntryContext.CpuSaveState
+ },
+ NULL, // SmmCoreEntry
+ {
+ mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions
+ RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry
+ },
+ NULL, // pointer to Ap Wrapper Func array
+ {NULL, NULL}, // List_Entry for Tokens.
+};
+
+CPU_HOT_PLUG_DATA mCpuHotPlugData = {
+ CPU_HOT_PLUG_DATA_REVISION_1, // Revision
+ 0, // Array Length of SmBase and APIC ID
+ NULL, // Pointer to APIC ID array
+ NULL, // Pointer to SMBASE array
+ 0, // Reserved
+ 0, // SmrrBase
+ 0 // SmrrSize
+};
+
+//
+// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
+//
+SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
+
+//
+// SMM Relocation variables
+//
+volatile BOOLEAN *mRebased;
+volatile BOOLEAN mIsBsp;
+
+///
+/// Handle for the SMM CPU Protocol
+///
+EFI_HANDLE mSmmCpuHandle = NULL;
+
+///
+/// SMM CPU Protocol instance
+///
+EFI_SMM_CPU_PROTOCOL mSmmCpu = {
+ SmmReadSaveState,
+ SmmWriteSaveState
+};
+
+///
+/// SMM Memory Attribute Protocol instance
+///
+EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
+ EdkiiSmmGetMemoryAttributes,
+ EdkiiSmmSetMemoryAttributes,
+ EdkiiSmmClearMemoryAttributes
+};
+
+EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
+
+//
+// SMM stack information
+//
+UINTN mSmmStackArrayBase;
+UINTN mSmmStackArrayEnd;
+UINTN mSmmStackSize;
+
+UINTN mSmmShadowStackSize;
+BOOLEAN mCetSupported = TRUE;
+
+UINTN mMaxNumberOfCpus = 1;
+UINTN mNumberOfCpus = 1;
+
+//
+// SMM ready to lock flag
+//
+BOOLEAN mSmmReadyToLock = FALSE;
+
+//
+// Global used to cache PCD for SMM Code Access Check enable
+//
+BOOLEAN mSmmCodeAccessCheckEnable = FALSE;
+
+//
+// Global copy of the PcdPteMemoryEncryptionAddressOrMask
+//
+UINT64 mAddressEncMask = 0;
+
+//
+// Spin lock used to serialize setting of SMM Code Access Check feature
+//
+SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
+
+//
+// Saved SMM ranges information
+//
+EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
+UINTN mSmmCpuSmramRangeCount;
+
+UINT8 mPhysicalAddressBits;
+
+//
+// Control register contents saved for SMM S3 resume state initialization.
+//
+UINT32 mSmmCr0;
+UINT32 mSmmCr4;
+
+/**
+ Initialize IDT to setup exception handlers for SMM.
+
+**/
+VOID
+InitializeSmmIdt (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN InterruptState;
+ IA32_DESCRIPTOR DxeIdtr;
+
+ //
+ // There are 32 (not 255) entries in it since only processor
+ // generated exceptions will be handled.
+ //
+ gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
+ //
+ // Allocate page aligned IDT, because it might be set as read only.
+ //
+ gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
+ ASSERT (gcSmiIdtr.Base != 0);
+ ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
+
+ //
+ // Disable Interrupt and save DXE IDT table
+ //
+ InterruptState = SaveAndDisableInterrupts ();
+ AsmReadIdtr (&DxeIdtr);
+ //
+ // Load SMM temporary IDT table
+ //
+ AsmWriteIdtr (&gcSmiIdtr);
+ //
+ // Setup SMM default exception handlers, SMM IDT table
+ // will be updated and saved in gcSmiIdtr
+ //
+ Status = InitializeCpuExceptionHandlers (NULL);
+ ASSERT_EFI_ERROR (Status);
+ //
+ // Restore DXE IDT table and CPU interrupt
+ //
+ AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr);
+ SetInterruptState (InterruptState);
+}
+
+/**
+ Search module name by input IP address and output it.
+
+ @param CallerIpAddress Caller instruction pointer.
+
+**/
+VOID
+DumpModuleInfoByIp (
+ IN UINTN CallerIpAddress
+ )
+{
+ UINTN Pe32Data;
+ VOID *PdbPointer;
+
+ //
+ // Find Image Base
+ //
+ Pe32Data = PeCoffSearchImageBase (CallerIpAddress);
+ if (Pe32Data != 0) {
+ DEBUG ((DEBUG_ERROR, "It is invoked from the instruction before IP(0x%p)", (VOID *) CallerIpAddress));
+ PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data);
+ if (PdbPointer != NULL) {
+ DEBUG ((DEBUG_ERROR, " in module (%a)\n", PdbPointer));
+ }
+ }
+}
+
+/**
+ Read information from the CPU save state.
+
+ @param This EFI_SMM_CPU_PROTOCOL instance
+ @param Width The number of bytes to read from the CPU save state.
+ @param Register Specifies the CPU register to read form the save state.
+ @param CpuIndex Specifies the zero-based index of the CPU save state.
+ @param Buffer Upon return, this holds the CPU register value read from the save state.
+
+ @retval EFI_SUCCESS The register was read from Save State
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
+ @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmReadSaveState (
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,
+ IN UINTN Width,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN CpuIndex,
+ OUT VOID *Buffer
+ )
+{
+ EFI_STATUS Status;
+
+ //
+ // Retrieve pointer to the specified CPU's SMM Save State buffer
+ //
+ if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
+ return EFI_INVALID_PARAMETER;
+ }
+ //
+ // The SpeculationBarrier() call here is to ensure the above check for the
+ // CpuIndex has been completed before the execution of subsequent codes.
+ //
+ SpeculationBarrier ();
+
+ //
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
+ //
+ // The pseudo-register only supports the 64-bit size specified by Width.
+ //
+ if (Width != sizeof (UINT64)) {
+ return EFI_INVALID_PARAMETER;
+ }
+ //
+ // If the processor is in SMM at the time the SMI occurred,
+ // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer.
+ // Otherwise, EFI_NOT_FOUND is returned.
+ //
+ if (*(mSmmMpSyncData->CpuData[CpuIndex].Present)) {
+ *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId;
+ return EFI_SUCCESS;
+ } else {
+ return EFI_NOT_FOUND;
+ }
+ }
+
+ if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
+ if (Status == EFI_UNSUPPORTED) {
+ Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer);
+ }
+ return Status;
+}
+
+/**
+ Write data to the CPU save state.
+
+ @param This EFI_SMM_CPU_PROTOCOL instance
+ @param Width The number of bytes to read from the CPU save state.
+ @param Register Specifies the CPU register to write to the save state.
+ @param CpuIndex Specifies the zero-based index of the CPU save state
+ @param Buffer Upon entry, this holds the new CPU register value.
+
+ @retval EFI_SUCCESS The register was written from Save State
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
+ @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
+
+**/
+EFI_STATUS
+EFIAPI
+SmmWriteSaveState (
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,
+ IN UINTN Width,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN CpuIndex,
+ IN CONST VOID *Buffer
+ )
+{
+ EFI_STATUS Status;
+
+ //
+ // Retrieve pointer to the specified CPU's SMM Save State buffer
+ //
+ if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) {
+ return EFI_SUCCESS;
+ }
+
+ if (!mSmmMpSyncData->CpuData[CpuIndex].Present) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
+ if (Status == EFI_UNSUPPORTED) {
+ Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer);
+ }
+ return Status;
+}
+
+
+/**
+ C function for SMI handler. To change all processor's SMMBase Register.
+
+**/
+VOID
+EFIAPI
+SmmInitHandler (
+ VOID
+ )
+{
+ UINT32 ApicId;
+ UINTN Index;
+
+ //
+ // Update SMM IDT entries' code segment and load IDT
+ //
+ AsmWriteIdtr (&gcSmiIdtr);
+ ApicId = GetApicId ();
+
+ ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
+
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
+ //
+ // Initialize SMM specific features on the currently executing CPU
+ //
+ SmmCpuFeaturesInitializeProcessor (
+ Index,
+ mIsBsp,
+ gSmmCpuPrivate->ProcessorInfo,
+ &mCpuHotPlugData
+ );
+
+ if (!mSmmS3Flag) {
+ //
+ // Check XD and BTS features on each processor on normal boot
+ //
+ CheckFeatureSupported ();
+ }
+
+ if (mIsBsp) {
+ //
+ // BSP rebase is already done above.
+ // Initialize private data during S3 resume
+ //
+ InitializeMpSyncData ();
+ }
+
+ //
+ // Hook return after RSM to set SMM re-based flag
+ //
+ SemaphoreHook (Index, &mRebased[Index]);
+
+ return;
+ }
+ }
+ ASSERT (FALSE);
+}
+
+/**
+ Relocate SmmBases for each processor.
+
+ Execute on first boot and all S3 resumes
+
+**/
+VOID
+EFIAPI
+SmmRelocateBases (
+ VOID
+ )
+{
+ UINT8 BakBuf[BACK_BUF_SIZE];
+ SMRAM_SAVE_STATE_MAP BakBuf2;
+ SMRAM_SAVE_STATE_MAP *CpuStatePtr;
+ UINT8 *U8Ptr;
+ UINT32 ApicId;
+ UINTN Index;
+ UINTN BspIndex;
+
+ //
+ // Make sure the reserved size is large enough for procedure SmmInitTemplate.
+ //
+ ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
+
+ //
+ // Patch ASM code template with current CR0, CR3, and CR4 values
+ //
+ mSmmCr0 = (UINT32)AsmReadCr0 ();
+ PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
+ PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
+ mSmmCr4 = (UINT32)AsmReadCr4 ();
+ PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
+
+ //
+ // Patch GDTR for SMM base relocation
+ //
+ gcSmiInitGdtr.Base = gcSmiGdtr.Base;
+ gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
+
+ U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
+ CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
+
+ //
+ // Backup original contents at address 0x38000
+ //
+ CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
+ CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
+
+ //
+ // Load image for relocation
+ //
+ CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
+
+ //
+ // Retrieve the local APIC ID of current processor
+ //
+ ApicId = GetApicId ();
+
+ //
+ // Relocate SM bases for all APs
+ // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
+ //
+ mIsBsp = FALSE;
+ BspIndex = (UINTN)-1;
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ mRebased[Index] = FALSE;
+ if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
+ SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
+ //
+ // Wait for this AP to finish its 1st SMI
+ //
+ while (!mRebased[Index]);
+ } else {
+ //
+ // BSP will be Relocated later
+ //
+ BspIndex = Index;
+ }
+ }
+
+ //
+ // Relocate BSP's SMM base
+ //
+ ASSERT (BspIndex != (UINTN)-1);
+ mIsBsp = TRUE;
+ SendSmiIpi (ApicId);
+ //
+ // Wait for the BSP to finish its 1st SMI
+ //
+ while (!mRebased[BspIndex]);
+
+ //
+ // Restore contents at address 0x38000
+ //
+ CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
+ CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
+}
+
+/**
+ SMM Ready To Lock event notification handler.
+
+ The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
+ perform additional lock actions that must be performed from SMM on the next SMI.
+
+ @param[in] Protocol Points to the protocol's unique identifier.
+ @param[in] Interface Points to the interface instance.
+ @param[in] Handle The handle on which the interface was installed.
+
+ @retval EFI_SUCCESS Notification handler runs successfully.
+ **/
+EFI_STATUS
+EFIAPI
+SmmReadyToLockEventNotify (
+ IN CONST EFI_GUID *Protocol,
+ IN VOID *Interface,
+ IN EFI_HANDLE Handle
+ )
+{
+ GetAcpiCpuData ();
+
+ //
+ // Cache a copy of UEFI memory map before we start profiling feature.
+ //
+ GetUefiMemoryMap ();
+
+ //
+ // Set SMM ready to lock flag and return
+ //
+ mSmmReadyToLock = TRUE;
+ return EFI_SUCCESS;
+}
+
+/**
+ The module Entry Point of the CPU SMM driver.
+
+ @param ImageHandle The firmware allocated handle for the EFI image.
+ @param SystemTable A pointer to the EFI System Table.
+
+ @retval EFI_SUCCESS The entry point is executed successfully.
+ @retval Other Some error occurs when executing this entry point.
+
+**/
+EFI_STATUS
+EFIAPI
+PiCpuSmmEntry (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+{
+ EFI_STATUS Status;
+ EFI_MP_SERVICES_PROTOCOL *MpServices;
+ UINTN NumberOfEnabledProcessors;
+ UINTN Index;
+ VOID *Buffer;
+ UINTN BufferPages;
+ UINTN TileCodeSize;
+ UINTN TileDataSize;
+ UINTN TileSize;
+ UINT8 *Stacks;
+ VOID *Registration;
+ UINT32 RegEax;
+ UINT32 RegEbx;
+ UINT32 RegEcx;
+ UINT32 RegEdx;
+ UINTN FamilyId;
+ UINTN ModelId;
+ UINT32 Cr3;
+
+ //
+ // Initialize address fixup
+ //
+ PiSmmCpuSmmInitFixupAddress ();
+ PiSmmCpuSmiEntryFixupAddress ();
+
+ //
+ // Initialize Debug Agent to support source level debug in SMM code
+ //
+ InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL);
+
+ //
+ // Report the start of CPU SMM initialization.
+ //
+ REPORT_STATUS_CODE (
+ EFI_PROGRESS_CODE,
+ EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT
+ );
+
+ //
+ // Find out SMRR Base and SMRR Size
+ //
+ FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize);
+
+ //
+ // Get MP Services Protocol
+ //
+ Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Use MP Services Protocol to retrieve the number of processors and number of enabled processors
+ //
+ Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors);
+ ASSERT_EFI_ERROR (Status);
+ ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber));
+
+ //
+ // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE.
+ // A constant BSP index makes no sense because it may be hot removed.
+ //
+ DEBUG_CODE (
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
+
+ ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection));
+ }
+ );
+
+ //
+ // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable.
+ //
+ mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable);
+ DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable));
+
+ //
+ // Save the PcdPteMemoryEncryptionAddressOrMask value into a global variable.
+ // Make sure AddressEncMask is contained to smallest supported address field.
+ //
+ mAddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & PAGING_1G_ADDRESS_MASK_64;
+ DEBUG ((EFI_D_INFO, "mAddressEncMask = 0x%lx\n", mAddressEncMask));
+
+ //
+ // If support CPU hot plug, we need to allocate resources for possibly hot-added processors
+ //
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber);
+ } else {
+ mMaxNumberOfCpus = mNumberOfCpus;
+ }
+ gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus;
+
+ //
+ // The CPU save state and code for the SMI entry point are tiled within an SMRAM
+ // allocated buffer. The minimum size of this buffer for a uniprocessor system
+ // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area
+ // just below SMBASE + 64KB. If more than one CPU is present in the platform,
+ // then the SMI entry point and the CPU save state areas can be tiles to minimize
+ // the total amount SMRAM required for all the CPUs. The tile size can be computed
+ // by adding the // CPU save state size, any extra CPU specific context, and
+ // the size of code that must be placed at the SMI entry point to transfer
+ // control to a C function in the native SMM execution mode. This size is
+ // rounded up to the nearest power of 2 to give the tile size for a each CPU.
+ // The total amount of memory required is the maximum number of CPUs that
+ // platform supports times the tile size. The picture below shows the tiling,
+ // where m is the number of tiles that fit in 32KB.
+ //
+ // +-----------------------------+ <-- 2^n offset from Base of allocated buffer
+ // | CPU m+1 Save State |
+ // +-----------------------------+
+ // | CPU m+1 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU 2m SMI Entry |
+ // +#############################+ <-- Base of allocated buffer + 64 KB
+ // | CPU m-1 Save State |
+ // +-----------------------------+
+ // | CPU m-1 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU 2m-1 SMI Entry |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | . . . . . . . . . . . . |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | CPU 2 Save State |
+ // +-----------------------------+
+ // | CPU 2 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU m+1 SMI Entry |
+ // +=============================+ <-- Base of allocated buffer + 32 KB
+ // | CPU 1 Save State |
+ // +-----------------------------+
+ // | CPU 1 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU m SMI Entry |
+ // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB
+ // | CPU 0 Save State |
+ // +-----------------------------+
+ // | CPU 0 Extra Data |
+ // +-----------------------------+
+ // | Padding |
+ // +-----------------------------+
+ // | CPU m-1 SMI Entry |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | . . . . . . . . . . . . |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | Padding |
+ // +-----------------------------+
+ // | CPU 1 SMI Entry |
+ // +=============================+ <-- 2^n offset from Base of allocated buffer
+ // | Padding |
+ // +-----------------------------+
+ // | CPU 0 SMI Entry |
+ // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB
+ //
+
+ //
+ // Retrieve CPU Family
+ //
+ AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, NULL);
+ FamilyId = (RegEax >> 8) & 0xf;
+ ModelId = (RegEax >> 4) & 0xf;
+ if (FamilyId == 0x06 || FamilyId == 0x0f) {
+ ModelId = ModelId | ((RegEax >> 12) & 0xf0);
+ }
+
+ RegEdx = 0;
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= CPUID_EXTENDED_CPU_SIG) {
+ AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
+ }
+ //
+ // Determine the mode of the CPU at the time an SMI occurs
+ // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+ // Volume 3C, Section 34.4.1.1
+ //
+ mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT;
+ if ((RegEdx & BIT29) != 0) {
+ mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
+ }
+ if (FamilyId == 0x06) {
+ if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) {
+ mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT;
+ }
+ }
+
+ DEBUG ((DEBUG_INFO, "PcdControlFlowEnforcementPropertyMask = %d\n", PcdGet32 (PcdControlFlowEnforcementPropertyMask)));
+ if (PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) {
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
+ if (RegEax > CPUID_EXTENDED_FUNCTION) {
+ AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, &RegEdx);
+ DEBUG ((DEBUG_INFO, "CPUID[7/0] ECX - 0x%08x\n", RegEcx));
+ DEBUG ((DEBUG_INFO, " CET_SS - 0x%08x\n", RegEcx & CPUID_CET_SS));
+ DEBUG ((DEBUG_INFO, " CET_IBT - 0x%08x\n", RegEdx & CPUID_CET_IBT));
+ if ((RegEcx & CPUID_CET_SS) == 0) {
+ mCetSupported = FALSE;
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
+ }
+ if (mCetSupported) {
+ AsmCpuidEx (CPUID_EXTENDED_STATE, CPUID_EXTENDED_STATE_SUB_LEAF, NULL, &RegEbx, &RegEcx, NULL);
+ DEBUG ((DEBUG_INFO, "CPUID[D/1] EBX - 0x%08x, ECX - 0x%08x\n", RegEbx, RegEcx));
+ AsmCpuidEx (CPUID_EXTENDED_STATE, 11, &RegEax, NULL, &RegEcx, NULL);
+ DEBUG ((DEBUG_INFO, "CPUID[D/11] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
+ AsmCpuidEx(CPUID_EXTENDED_STATE, 12, &RegEax, NULL, &RegEcx, NULL);
+ DEBUG ((DEBUG_INFO, "CPUID[D/12] EAX - 0x%08x, ECX - 0x%08x\n", RegEax, RegEcx));
+ }
+ }
+ } else {
+ mCetSupported = FALSE;
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
+ }
+
+ //
+ // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU
+ // specific context start starts at SMBASE + SMM_PSD_OFFSET, and the SMI entry point.
+ // This size is rounded up to nearest power of 2.
+ //
+ TileCodeSize = GetSmiHandlerSize ();
+ TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
+ TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
+ TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
+ TileSize = TileDataSize + TileCodeSize - 1;
+ TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
+ DEBUG ((EFI_D_INFO, "SMRAM TileSize = 0x%08x (0x%08x, 0x%08x)\n", TileSize, TileCodeSize, TileDataSize));
+
+ //
+ // If the TileSize is larger than space available for the SMI Handler of
+ // CPU[i], the extra CPU specific context of CPU[i+1], and the SMRAM Save
+ // State Map of CPU[i+1], then ASSERT(). If this ASSERT() is triggered, then
+ // the SMI Handler size must be reduced or the size of the extra CPU specific
+ // context must be reduced.
+ //
+ ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
+
+ //
+ // Allocate buffer for all of the tiles.
+ //
+ // Intel(R) 64 and IA-32 Architectures Software Developer's Manual
+ // Volume 3C, Section 34.11 SMBASE Relocation
+ // For Pentium and Intel486 processors, the SMBASE values must be
+ // aligned on a 32-KByte boundary or the processor will enter shutdown
+ // state during the execution of a RSM instruction.
+ //
+ // Intel486 processors: FamilyId is 4
+ // Pentium processors : FamilyId is 5
+ //
+ BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
+ if ((FamilyId == 4) || (FamilyId == 5)) {
+ Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
+ } else {
+ Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
+ }
+ ASSERT (Buffer != NULL);
+ DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
+
+ //
+ // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
+ //
+ gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus);
+ ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL);
+
+ gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus);
+ ASSERT (gSmmCpuPrivate->Operation != NULL);
+
+ gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
+ ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL);
+
+ gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus);
+ ASSERT (gSmmCpuPrivate->CpuSaveState != NULL);
+
+ mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize;
+ mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState;
+
+ //
+ // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA.
+ //
+ mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus);
+ ASSERT (mCpuHotPlugData.ApicId != NULL);
+ mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
+ ASSERT (mCpuHotPlugData.SmBase != NULL);
+ mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus;
+
+ //
+ // Retrieve APIC ID of each enabled processor from the MP Services protocol.
+ // Also compute the SMBASE address, CPU Save State address, and CPU Save state
+ // size for each CPU in the platform
+ //
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
+ gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP);
+ gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
+ gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
+
+ if (Index < mNumberOfCpus) {
+ Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]);
+ ASSERT_EFI_ERROR (Status);
+ mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId;
+
+ DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n",
+ Index,
+ (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId,
+ mCpuHotPlugData.SmBase[Index],
+ gSmmCpuPrivate->CpuSaveState[Index],
+ gSmmCpuPrivate->CpuSaveStateSize[Index]
+ ));
+ } else {
+ gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID;
+ mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID;
+ }
+ }
+
+ //
+ // Allocate SMI stacks for all processors.
+ //
+ mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)));
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // 2 more pages is allocated for each processor.
+ // one is guard page and the other is known good stack.
+ //
+ // +-------------------------------------------+-----+-------------------------------------------+
+ // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack |
+ // +-------------------------------------------+-----+-------------------------------------------+
+ // | | | |
+ // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->|
+ //
+ mSmmStackSize += EFI_PAGES_TO_SIZE (2);
+ }
+
+ mSmmShadowStackSize = 0;
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ //
+ // Append Shadow Stack after normal stack
+ //
+ // |= Stacks
+ // +--------------------------------------------------+---------------------------------------------------------------+
+ // | Known Good Stack | Guard Page | SMM Stack | Known Good Shadow Stack | Guard Page | SMM Shadow Stack |
+ // +--------------------------------------------------+---------------------------------------------------------------+
+ // | |PcdCpuSmmStackSize| |PcdCpuSmmShadowStackSize|
+ // |<---------------- mSmmStackSize ----------------->|<--------------------- mSmmShadowStackSize ------------------->|
+ // | |
+ // |<-------------------------------------------- Processor N ------------------------------------------------------->|
+ //
+ mSmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ mSmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
+ }
+ }
+
+ Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (mSmmStackSize + mSmmShadowStackSize)));
+ ASSERT (Stacks != NULL);
+ mSmmStackArrayBase = (UINTN)Stacks;
+ mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (mSmmStackSize + mSmmShadowStackSize) - 1;
+
+ DEBUG ((DEBUG_INFO, "Stacks - 0x%x\n", Stacks));
+ DEBUG ((DEBUG_INFO, "mSmmStackSize - 0x%x\n", mSmmStackSize));
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
+ }
+
+ //
+ // Set SMI stack for SMM base relocation
+ //
+ PatchInstructionX86 (
+ gPatchSmmInitStack,
+ (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)),
+ sizeof (UINTN)
+ );
+
+ //
+ // Initialize IDT
+ //
+ InitializeSmmIdt ();
+
+ //
+ // Relocate SMM Base addresses to the ones allocated from SMRAM
+ //
+ mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
+ ASSERT (mRebased != NULL);
+ SmmRelocateBases ();
+
+ //
+ // Call hook for BSP to perform extra actions in normal mode after all
+ // SMM base addresses have been relocated on all CPUs
+ //
+ SmmCpuFeaturesSmmRelocationComplete ();
+
+ DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
+
+ //
+ // SMM Time initialization
+ //
+ InitializeSmmTimer ();
+
+ //
+ // Initialize MP globals
+ //
+ Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize, mSmmShadowStackSize);
+
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ SetShadowStack (
+ Cr3,
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + (mSmmStackSize + mSmmShadowStackSize) * Index,
+ mSmmShadowStackSize
+ );
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ SetNotPresentPage (
+ Cr3,
+ (EFI_PHYSICAL_ADDRESS)(UINTN)Stacks + mSmmStackSize + EFI_PAGES_TO_SIZE(1) + (mSmmStackSize + mSmmShadowStackSize) * Index,
+ EFI_PAGES_TO_SIZE(1)
+ );
+ }
+ }
+ }
+
+ //
+ // Fill in SMM Reserved Regions
+ //
+ gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
+ gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0;
+
+ //
+ // Install the SMM Configuration Protocol onto a new handle on the handle database.
+ // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer
+ // to an SMRAM address will be present in the handle database
+ //
+ Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces (
+ &gSmmCpuPrivate->SmmCpuHandle,
+ &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration,
+ NULL
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Install the SMM CPU Protocol into SMM protocol database
+ //
+ Status = gSmst->SmmInstallProtocolInterface (
+ &mSmmCpuHandle,
+ &gEfiSmmCpuProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmCpu
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Install the SMM Memory Attribute Protocol into SMM protocol database
+ //
+ Status = gSmst->SmmInstallProtocolInterface (
+ &mSmmCpuHandle,
+ &gEdkiiSmmMemoryAttributeProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmMemoryAttribute
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Initialize global buffer for MM MP.
+ //
+ InitializeDataForMmMp ();
+
+ //
+ // Install the SMM Mp Protocol into SMM protocol database
+ //
+ Status = gSmst->SmmInstallProtocolInterface (
+ &mSmmCpuHandle,
+ &gEfiMmMpProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmMp
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported.
+ //
+ if (FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ Status = PcdSet64S (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData);
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ //
+ // Initialize SMM CPU Services Support
+ //
+ Status = InitializeSmmCpuServices (mSmmCpuHandle);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // register SMM Ready To Lock Protocol notification
+ //
+ Status = gSmst->SmmRegisterProtocolNotify (
+ &gEfiSmmReadyToLockProtocolGuid,
+ SmmReadyToLockEventNotify,
+ &Registration
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Initialize SMM Profile feature
+ //
+ InitSmmProfile (Cr3);
+
+ GetAcpiS3EnableFlag ();
+ InitSmmS3ResumeState (Cr3);
+
+ DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n"));
+
+ return EFI_SUCCESS;
+}
+
+/**
+
+ Find out SMRAM information including SMRR base and SMRR size.
+
+ @param SmrrBase SMRR base
+ @param SmrrSize SMRR size
+
+**/
+VOID
+FindSmramInfo (
+ OUT UINT32 *SmrrBase,
+ OUT UINT32 *SmrrSize
+ )
+{
+ EFI_STATUS Status;
+ UINTN Size;
+ EFI_SMM_ACCESS2_PROTOCOL *SmmAccess;
+ EFI_SMRAM_DESCRIPTOR *CurrentSmramRange;
+ UINTN Index;
+ UINT64 MaxSize;
+ BOOLEAN Found;
+
+ //
+ // Get SMM Access Protocol
+ //
+ Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Get SMRAM information
+ //
+ Size = 0;
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL);
+ ASSERT (Status == EFI_BUFFER_TOO_SMALL);
+
+ mSmmCpuSmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size);
+ ASSERT (mSmmCpuSmramRanges != NULL);
+
+ Status = SmmAccess->GetCapabilities (SmmAccess, &Size, mSmmCpuSmramRanges);
+ ASSERT_EFI_ERROR (Status);
+
+ mSmmCpuSmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR);
+
+ //
+ // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size
+ //
+ CurrentSmramRange = NULL;
+ for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < mSmmCpuSmramRangeCount; Index++) {
+ //
+ // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization
+ //
+ if ((mSmmCpuSmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
+ continue;
+ }
+
+ if (mSmmCpuSmramRanges[Index].CpuStart >= BASE_1MB) {
+ if ((mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) <= SMRR_MAX_ADDRESS) {
+ if (mSmmCpuSmramRanges[Index].PhysicalSize >= MaxSize) {
+ MaxSize = mSmmCpuSmramRanges[Index].PhysicalSize;
+ CurrentSmramRange = &mSmmCpuSmramRanges[Index];
+ }
+ }
+ }
+ }
+
+ ASSERT (CurrentSmramRange != NULL);
+
+ *SmrrBase = (UINT32)CurrentSmramRange->CpuStart;
+ *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize;
+
+ do {
+ Found = FALSE;
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
+ if (mSmmCpuSmramRanges[Index].CpuStart < *SmrrBase &&
+ *SmrrBase == (mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize)) {
+ *SmrrBase = (UINT32)mSmmCpuSmramRanges[Index].CpuStart;
+ *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
+ Found = TRUE;
+ } else if ((*SmrrBase + *SmrrSize) == mSmmCpuSmramRanges[Index].CpuStart && mSmmCpuSmramRanges[Index].PhysicalSize > 0) {
+ *SmrrSize = (UINT32)(*SmrrSize + mSmmCpuSmramRanges[Index].PhysicalSize);
+ Found = TRUE;
+ }
+ }
+ } while (Found);
+
+ DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize));
+}
+
+/**
+Configure SMM Code Access Check feature on an AP.
+SMM Feature Control MSR will be locked after configuration.
+
+@param[in,out] Buffer Pointer to private data buffer.
+**/
+VOID
+EFIAPI
+ConfigSmmCodeAccessCheckOnCurrentProcessor (
+ IN OUT VOID *Buffer
+ )
+{
+ UINTN CpuIndex;
+ UINT64 SmmFeatureControlMsr;
+ UINT64 NewSmmFeatureControlMsr;
+
+ //
+ // Retrieve the CPU Index from the context passed in
+ //
+ CpuIndex = *(UINTN *)Buffer;
+
+ //
+ // Get the current SMM Feature Control MSR value
+ //
+ SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl);
+
+ //
+ // Compute the new SMM Feature Control MSR value
+ //
+ NewSmmFeatureControlMsr = SmmFeatureControlMsr;
+ if (mSmmCodeAccessCheckEnable) {
+ NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT;
+ if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) {
+ NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT;
+ }
+ }
+
+ //
+ // Only set the SMM Feature Control MSR value if the new value is different than the current value
+ //
+ if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) {
+ SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr);
+ }
+
+ //
+ // Release the spin lock user to serialize the updates to the SMM Feature Control MSR
+ //
+ ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
+}
+
+/**
+Configure SMM Code Access Check feature for all processors.
+SMM Feature Control MSR will be locked after configuration.
+**/
+VOID
+ConfigSmmCodeAccessCheck (
+ VOID
+ )
+{
+ UINTN Index;
+ EFI_STATUS Status;
+
+ //
+ // Check to see if the Feature Control MSR is supported on this CPU
+ //
+ Index = gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu;
+ if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) {
+ mSmmCodeAccessCheckEnable = FALSE;
+ return;
+ }
+
+ //
+ // Check to see if the CPU supports the SMM Code Access Check feature
+ // Do not access this MSR unless the CPU supports the SmmRegFeatureControl
+ //
+ if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) {
+ mSmmCodeAccessCheckEnable = FALSE;
+ return;
+ }
+
+ //
+ // Initialize the lock used to serialize the MSR programming in BSP and all APs
+ //
+ InitializeSpinLock (mConfigSmmCodeAccessCheckLock);
+
+ //
+ // Acquire Config SMM Code Access Check spin lock. The BSP will release the
+ // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
+ //
+ AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
+
+ //
+ // Enable SMM Code Access Check feature on the BSP.
+ //
+ ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index);
+
+ //
+ // Enable SMM Code Access Check feature for the APs.
+ //
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
+ if (Index != gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) {
+ //
+ // If this processor does not exist
+ //
+ continue;
+ }
+ //
+ // Acquire Config SMM Code Access Check spin lock. The AP will release the
+ // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor().
+ //
+ AcquireSpinLock (mConfigSmmCodeAccessCheckLock);
+
+ //
+ // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP.
+ //
+ Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Wait for the AP to release the Config SMM Code Access Check spin lock.
+ //
+ while (!AcquireSpinLockOrFail (mConfigSmmCodeAccessCheckLock)) {
+ CpuPause ();
+ }
+
+ //
+ // Release the Config SMM Code Access Check spin lock.
+ //
+ ReleaseSpinLock (mConfigSmmCodeAccessCheckLock);
+ }
+ }
+}
+
+/**
+ This API provides a way to allocate memory for page table.
+
+ This API can be called more once to allocate memory for page tables.
+
+ Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
+ allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
+ is returned. If there is not enough memory remaining to satisfy the request, then NULL is
+ returned.
+
+ @param Pages The number of 4 KB pages to allocate.
+
+ @return A pointer to the allocated buffer or NULL if allocation fails.
+
+**/
+VOID *
+AllocatePageTableMemory (
+ IN UINTN Pages
+ )
+{
+ VOID *Buffer;
+
+ Buffer = SmmCpuFeaturesAllocatePageTableMemory (Pages);
+ if (Buffer != NULL) {
+ return Buffer;
+ }
+ return AllocatePages (Pages);
+}
+
+/**
+ Allocate pages for code.
+
+ @param[in] Pages Number of pages to be allocated.
+
+ @return Allocated memory.
+**/
+VOID *
+AllocateCodePages (
+ IN UINTN Pages
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS Memory;
+
+ if (Pages == 0) {
+ return NULL;
+ }
+
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
+ if (EFI_ERROR (Status)) {
+ return NULL;
+ }
+ return (VOID *) (UINTN) Memory;
+}
+
+/**
+ Allocate aligned pages for code.
+
+ @param[in] Pages Number of pages to be allocated.
+ @param[in] Alignment The requested alignment of the allocation.
+ Must be a power of two.
+ If Alignment is zero, then byte alignment is used.
+
+ @return Allocated memory.
+**/
+VOID *
+AllocateAlignedCodePages (
+ IN UINTN Pages,
+ IN UINTN Alignment
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS Memory;
+ UINTN AlignedMemory;
+ UINTN AlignmentMask;
+ UINTN UnalignedPages;
+ UINTN RealPages;
+
+ //
+ // Alignment must be a power of two or zero.
+ //
+ ASSERT ((Alignment & (Alignment - 1)) == 0);
+
+ if (Pages == 0) {
+ return NULL;
+ }
+ if (Alignment > EFI_PAGE_SIZE) {
+ //
+ // Calculate the total number of pages since alignment is larger than page size.
+ //
+ AlignmentMask = Alignment - 1;
+ RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
+ //
+ // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
+ //
+ ASSERT (RealPages > Pages);
+
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
+ if (EFI_ERROR (Status)) {
+ return NULL;
+ }
+ AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
+ UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
+ if (UnalignedPages > 0) {
+ //
+ // Free first unaligned page(s).
+ //
+ Status = gSmst->SmmFreePages (Memory, UnalignedPages);
+ ASSERT_EFI_ERROR (Status);
+ }
+ Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
+ UnalignedPages = RealPages - Pages - UnalignedPages;
+ if (UnalignedPages > 0) {
+ //
+ // Free last unaligned page(s).
+ //
+ Status = gSmst->SmmFreePages (Memory, UnalignedPages);
+ ASSERT_EFI_ERROR (Status);
+ }
+ } else {
+ //
+ // Do not over-allocate pages in this case.
+ //
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
+ if (EFI_ERROR (Status)) {
+ return NULL;
+ }
+ AlignedMemory = (UINTN) Memory;
+ }
+ return (VOID *) AlignedMemory;
+}
+
+/**
+ Perform the remaining tasks.
+
+**/
+VOID
+PerformRemainingTasks (
+ VOID
+ )
+{
+ if (mSmmReadyToLock) {
+ //
+ // Start SMM Profile feature
+ //
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfileStart ();
+ }
+ //
+ // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
+ //
+ InitPaging ();
+
+ //
+ // Mark critical region to be read-only in page table
+ //
+ SetMemMapAttributes ();
+
+ if (IsRestrictedMemoryAccess ()) {
+ //
+ // For outside SMRAM, we only map SMM communication buffer or MMIO.
+ //
+ SetUefiMemMapAttributes ();
+
+ //
+ // Set page table itself to be read-only
+ //
+ SetPageTableAttributes ();
+ }
+
+ //
+ // Configure SMM Code Access Check feature if available.
+ //
+ ConfigSmmCodeAccessCheck ();
+
+ SmmCpuFeaturesCompleteSmmReadyToLock ();
+
+ //
+ // Clean SMM ready to lock flag
+ //
+ mSmmReadyToLock = FALSE;
+ }
+}
+
+/**
+ Perform the pre tasks.
+
+**/
+VOID
+PerformPreTasks (
+ VOID
+ )
+{
+ RestoreSmmConfigurationInS3 ();
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
new file mode 100644
index 00000000..6f35d899
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -0,0 +1,1479 @@
+/** @file
+Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU.
+
+Copyright (c) 2009 - 2020, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _CPU_PISMMCPUDXESMM_H_
+#define _CPU_PISMMCPUDXESMM_H_
+
+#include <PiSmm.h>
+
+#include <Protocol/MpService.h>
+#include <Protocol/SmmConfiguration.h>
+#include <Protocol/SmmCpu.h>
+#include <Protocol/SmmAccess2.h>
+#include <Protocol/SmmReadyToLock.h>
+#include <Protocol/SmmCpuService.h>
+#include <Protocol/SmmMemoryAttribute.h>
+#include <Protocol/MmMp.h>
+
+#include <Guid/AcpiS3Context.h>
+#include <Guid/MemoryAttributesTable.h>
+#include <Guid/PiSmmMemoryAttributesTable.h>
+
+#include <Library/BaseLib.h>
+#include <Library/IoLib.h>
+#include <Library/TimerLib.h>
+#include <Library/SynchronizationLib.h>
+#include <Library/DebugLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/PcdLib.h>
+#include <Library/MtrrLib.h>
+#include <Library/SmmCpuPlatformHookLib.h>
+#include <Library/SmmServicesTableLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/UefiRuntimeServicesTableLib.h>
+#include <Library/DebugAgentLib.h>
+#include <Library/UefiLib.h>
+#include <Library/HobLib.h>
+#include <Library/LocalApicLib.h>
+#include <Library/UefiCpuLib.h>
+#include <Library/CpuExceptionHandlerLib.h>
+#include <Library/ReportStatusCodeLib.h>
+#include <Library/SmmCpuFeaturesLib.h>
+#include <Library/PeCoffGetEntryPointLib.h>
+#include <Library/RegisterCpuFeaturesLib.h>
+
+#include <AcpiCpuData.h>
+#include <CpuHotPlugData.h>
+
+#include <Register/Intel/Cpuid.h>
+#include <Register/Intel/Msr.h>
+
+#include "CpuService.h"
+#include "SmmProfile.h"
+
+//
+// CET definition
+//
+#define CPUID_CET_SS BIT7
+#define CPUID_CET_IBT BIT20
+
+#define CR4_CET_ENABLE BIT23
+
+#define MSR_IA32_S_CET 0x6A2
+#define MSR_IA32_PL0_SSP 0x6A4
+#define MSR_IA32_INTERRUPT_SSP_TABLE_ADDR 0x6A8
+
+typedef union {
+ struct {
+ // enable shadow stacks
+ UINT32 SH_STK_ENP:1;
+ // enable the WRSS{D,Q}W instructions.
+ UINT32 WR_SHSTK_EN:1;
+ // enable tracking of indirect call/jmp targets to be ENDBRANCH instruction.
+ UINT32 ENDBR_EN:1;
+ // enable legacy compatibility treatment for indirect call/jmp tracking.
+ UINT32 LEG_IW_EN:1;
+ // enable use of no-track prefix on indirect call/jmp.
+ UINT32 NO_TRACK_EN:1;
+ // disable suppression of CET indirect branch tracking on legacy compatibility.
+ UINT32 SUPPRESS_DIS:1;
+ UINT32 RSVD:4;
+ // indirect branch tracking is suppressed.
+ // This bit can be written to 1 only if TRACKER is written as IDLE.
+ UINT32 SUPPRESS:1;
+ // Value of the endbranch state machine
+ // Values: IDLE (0), WAIT_FOR_ENDBRANCH(1).
+ UINT32 TRACKER:1;
+ // linear address of a bitmap in memory indicating valid
+ // pages as target of CALL/JMP_indirect that do not land on ENDBRANCH when CET is enabled
+ // and not suppressed. Valid when ENDBR_EN is 1. Must be machine canonical when written on
+ // parts that support 64 bit mode. On parts that do not support 64 bit mode, the bits 63:32 are
+ // reserved and must be 0. This value is extended by 12 bits at the low end to form the base address
+ // (this automatically aligns the address on a 4-Kbyte boundary).
+ UINT32 EB_LEG_BITMAP_BASE_low:12;
+ UINT32 EB_LEG_BITMAP_BASE_high:32;
+ } Bits;
+ UINT64 Uint64;
+} MSR_IA32_CET;
+
+//
+// MSRs required for configuration of SMM Code Access Check
+//
+#define EFI_MSR_SMM_MCA_CAP 0x17D
+#define SMM_CODE_ACCESS_CHK_BIT BIT58
+
+#define SMM_FEATURE_CONTROL_LOCK_BIT BIT0
+#define SMM_CODE_CHK_EN_BIT BIT2
+
+///
+/// Page Table Entry
+///
+#define IA32_PG_P BIT0
+#define IA32_PG_RW BIT1
+#define IA32_PG_U BIT2
+#define IA32_PG_WT BIT3
+#define IA32_PG_CD BIT4
+#define IA32_PG_A BIT5
+#define IA32_PG_D BIT6
+#define IA32_PG_PS BIT7
+#define IA32_PG_PAT_2M BIT12
+#define IA32_PG_PAT_4K IA32_PG_PS
+#define IA32_PG_PMNT BIT62
+#define IA32_PG_NX BIT63
+
+#define PAGE_ATTRIBUTE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_U | IA32_PG_RW | IA32_PG_P)
+//
+// Bits 1, 2, 5, 6 are reserved in the IA32 PAE PDPTE
+// X64 PAE PDPTE does not have such restriction
+//
+#define IA32_PAE_PDPTE_ATTRIBUTE_BITS (IA32_PG_P)
+
+#define PAGE_PROGATE_BITS (IA32_PG_NX | PAGE_ATTRIBUTE_BITS)
+
+#define PAGING_4K_MASK 0xFFF
+#define PAGING_2M_MASK 0x1FFFFF
+#define PAGING_1G_MASK 0x3FFFFFFF
+
+#define PAGING_PAE_INDEX_MASK 0x1FF
+
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
+
+#define SMRR_MAX_ADDRESS BASE_4GB
+
+typedef enum {
+ PageNone,
+ Page4K,
+ Page2M,
+ Page1G,
+} PAGE_ATTRIBUTE;
+
+typedef struct {
+ PAGE_ATTRIBUTE Attribute;
+ UINT64 Length;
+ UINT64 AddressMask;
+} PAGE_ATTRIBUTE_TABLE;
+
+//
+// Size of Task-State Segment defined in IA32 Manual
+//
+#define TSS_SIZE 104
+#define EXCEPTION_TSS_SIZE (TSS_SIZE + 4) // Add 4 bytes SSP
+#define TSS_X64_IST1_OFFSET 36
+#define TSS_IA32_CR3_OFFSET 28
+#define TSS_IA32_ESP_OFFSET 56
+#define TSS_IA32_SSP_OFFSET 104
+
+#define CR0_WP BIT16
+
+//
+// Code select value
+//
+#define PROTECT_MODE_CODE_SEGMENT 0x08
+#define LONG_MODE_CODE_SEGMENT 0x38
+
+//
+// The size 0x20 must be bigger than
+// the size of template code of SmmInit. Currently,
+// the size of SmmInit requires the 0x16 Bytes buffer
+// at least.
+//
+#define BACK_BUF_SIZE 0x20
+
+#define EXCEPTION_VECTOR_NUMBER 0x20
+
+#define INVALID_APIC_ID 0xFFFFFFFFFFFFFFFFULL
+
+typedef UINT32 SMM_CPU_ARRIVAL_EXCEPTIONS;
+#define ARRIVAL_EXCEPTION_BLOCKED 0x1
+#define ARRIVAL_EXCEPTION_DELAYED 0x2
+#define ARRIVAL_EXCEPTION_SMI_DISABLED 0x4
+
+//
+// Wrapper used to convert EFI_AP_PROCEDURE2 and EFI_AP_PROCEDURE.
+//
+typedef struct {
+ EFI_AP_PROCEDURE Procedure;
+ VOID *ProcedureArgument;
+} PROCEDURE_WRAPPER;
+
+#define PROCEDURE_TOKEN_SIGNATURE SIGNATURE_32 ('P', 'R', 'T', 'S')
+
+typedef struct {
+ UINTN Signature;
+ LIST_ENTRY Link;
+
+ SPIN_LOCK *SpinLock;
+ volatile UINT32 RunningApCount;
+} PROCEDURE_TOKEN;
+
+#define PROCEDURE_TOKEN_FROM_LINK(a) CR (a, PROCEDURE_TOKEN, Link, PROCEDURE_TOKEN_SIGNATURE)
+
+#define TOKEN_BUFFER_SIGNATURE SIGNATURE_32 ('T', 'K', 'B', 'S')
+
+typedef struct {
+ UINTN Signature;
+ LIST_ENTRY Link;
+
+ UINT8 *Buffer;
+} TOKEN_BUFFER;
+
+#define TOKEN_BUFFER_FROM_LINK(a) CR (a, TOKEN_BUFFER, Link, TOKEN_BUFFER_SIGNATURE)
+
+//
+// Private structure for the SMM CPU module that is stored in DXE Runtime memory
+// Contains the SMM Configuration Protocols that is produced.
+// Contains a mix of DXE and SMM contents. All the fields must be used properly.
+//
+#define SMM_CPU_PRIVATE_DATA_SIGNATURE SIGNATURE_32 ('s', 'c', 'p', 'u')
+
+typedef struct {
+ UINTN Signature;
+
+ EFI_HANDLE SmmCpuHandle;
+
+ EFI_PROCESSOR_INFORMATION *ProcessorInfo;
+ SMM_CPU_OPERATION *Operation;
+ UINTN *CpuSaveStateSize;
+ VOID **CpuSaveState;
+
+ EFI_SMM_RESERVED_SMRAM_REGION SmmReservedSmramRegion[1];
+ EFI_SMM_ENTRY_CONTEXT SmmCoreEntryContext;
+ EFI_SMM_ENTRY_POINT SmmCoreEntry;
+
+ EFI_SMM_CONFIGURATION_PROTOCOL SmmConfiguration;
+
+ PROCEDURE_WRAPPER *ApWrapperFunc;
+ LIST_ENTRY TokenList;
+ LIST_ENTRY *FirstFreeToken;
+} SMM_CPU_PRIVATE_DATA;
+
+extern SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate;
+extern CPU_HOT_PLUG_DATA mCpuHotPlugData;
+extern UINTN mMaxNumberOfCpus;
+extern UINTN mNumberOfCpus;
+extern EFI_SMM_CPU_PROTOCOL mSmmCpu;
+extern EFI_MM_MP_PROTOCOL mSmmMp;
+extern UINTN mInternalCr3;
+
+///
+/// The mode of the CPU at the time an SMI occurs
+///
+extern UINT8 mSmmSaveStateRegisterLma;
+
+//
+// SMM CPU Protocol function prototypes.
+//
+
+/**
+ Read information from the CPU save state.
+
+ @param This EFI_SMM_CPU_PROTOCOL instance
+ @param Width The number of bytes to read from the CPU save state.
+ @param Register Specifies the CPU register to read form the save state.
+ @param CpuIndex Specifies the zero-based index of the CPU save state
+ @param Buffer Upon return, this holds the CPU register value read from the save state.
+
+ @retval EFI_SUCCESS The register was read from Save State
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
+ @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmReadSaveState (
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,
+ IN UINTN Width,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN CpuIndex,
+ OUT VOID *Buffer
+ );
+
+/**
+ Write data to the CPU save state.
+
+ @param This EFI_SMM_CPU_PROTOCOL instance
+ @param Width The number of bytes to read from the CPU save state.
+ @param Register Specifies the CPU register to write to the save state.
+ @param CpuIndex Specifies the zero-based index of the CPU save state
+ @param Buffer Upon entry, this holds the new CPU register value.
+
+ @retval EFI_SUCCESS The register was written from Save State
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor
+ @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct
+
+**/
+EFI_STATUS
+EFIAPI
+SmmWriteSaveState (
+ IN CONST EFI_SMM_CPU_PROTOCOL *This,
+ IN UINTN Width,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN CpuIndex,
+ IN CONST VOID *Buffer
+ );
+
+/**
+Read a CPU Save State register on the target processor.
+
+This function abstracts the differences that whether the CPU Save State register is in the
+IA32 CPU Save State Map or X64 CPU Save State Map.
+
+This function supports reading a CPU Save State register in SMBase relocation handler.
+
+@param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+@param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+@param[in] Width The number of bytes to read from the CPU save state.
+@param[out] Buffer Upon return, this holds the CPU register value read from the save state.
+
+@retval EFI_SUCCESS The register was read from Save State.
+@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+@retval EFI_INVALID_PARAMETER Buffer is NULL, or Width does not meet requirement per Register type.
+
+**/
+EFI_STATUS
+EFIAPI
+ReadSaveStateRegister (
+ IN UINTN CpuIndex,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN Width,
+ OUT VOID *Buffer
+ );
+
+/**
+Write value to a CPU Save State register on the target processor.
+
+This function abstracts the differences that whether the CPU Save State register is in the
+IA32 CPU Save State Map or X64 CPU Save State Map.
+
+This function supports writing a CPU Save State register in SMBase relocation handler.
+
+@param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+@param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+@param[in] Width The number of bytes to read from the CPU save state.
+@param[in] Buffer Upon entry, this holds the new CPU register value.
+
+@retval EFI_SUCCESS The register was written to Save State.
+@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+@retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct.
+
+**/
+EFI_STATUS
+EFIAPI
+WriteSaveStateRegister (
+ IN UINTN CpuIndex,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN Width,
+ IN CONST VOID *Buffer
+ );
+
+extern CONST UINT8 gcSmmInitTemplate[];
+extern CONST UINT16 gcSmmInitSize;
+X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr0;
+extern UINT32 mSmmCr0;
+X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr3;
+extern UINT32 mSmmCr4;
+X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr4;
+X86_ASSEMBLY_PATCH_LABEL gPatchSmmInitStack;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetSupported;
+extern BOOLEAN mCetSupported;
+
+/**
+ Semaphore operation for all processor relocate SMMBase.
+**/
+VOID
+EFIAPI
+SmmRelocationSemaphoreComplete (
+ VOID
+ );
+
+///
+/// The type of SMM CPU Information
+///
+typedef struct {
+ SPIN_LOCK *Busy;
+ volatile EFI_AP_PROCEDURE2 Procedure;
+ volatile VOID *Parameter;
+ volatile UINT32 *Run;
+ volatile BOOLEAN *Present;
+ PROCEDURE_TOKEN *Token;
+ EFI_STATUS *Status;
+} SMM_CPU_DATA_BLOCK;
+
+typedef enum {
+ SmmCpuSyncModeTradition,
+ SmmCpuSyncModeRelaxedAp,
+ SmmCpuSyncModeMax
+} SMM_CPU_SYNC_MODE;
+
+typedef struct {
+ //
+ // Pointer to an array. The array should be located immediately after this structure
+ // so that UC cache-ability can be set together.
+ //
+ SMM_CPU_DATA_BLOCK *CpuData;
+ volatile UINT32 *Counter;
+ volatile UINT32 BspIndex;
+ volatile BOOLEAN *InsideSmm;
+ volatile BOOLEAN *AllCpusInSync;
+ volatile SMM_CPU_SYNC_MODE EffectiveSyncMode;
+ volatile BOOLEAN SwitchBsp;
+ volatile BOOLEAN *CandidateBsp;
+ EFI_AP_PROCEDURE StartupProcedure;
+ VOID *StartupProcArgs;
+} SMM_DISPATCHER_MP_SYNC_DATA;
+
+#define SMM_PSD_OFFSET 0xfb00
+
+///
+/// All global semaphores' pointer
+///
+typedef struct {
+ volatile UINT32 *Counter;
+ volatile BOOLEAN *InsideSmm;
+ volatile BOOLEAN *AllCpusInSync;
+ SPIN_LOCK *PFLock;
+ SPIN_LOCK *CodeAccessCheckLock;
+} SMM_CPU_SEMAPHORE_GLOBAL;
+
+///
+/// All semaphores for each processor
+///
+typedef struct {
+ SPIN_LOCK *Busy;
+ volatile UINT32 *Run;
+ volatile BOOLEAN *Present;
+ SPIN_LOCK *Token;
+} SMM_CPU_SEMAPHORE_CPU;
+
+///
+/// All semaphores' information
+///
+typedef struct {
+ SMM_CPU_SEMAPHORE_GLOBAL SemaphoreGlobal;
+ SMM_CPU_SEMAPHORE_CPU SemaphoreCpu;
+} SMM_CPU_SEMAPHORES;
+
+extern IA32_DESCRIPTOR gcSmiGdtr;
+extern EFI_PHYSICAL_ADDRESS mGdtBuffer;
+extern UINTN mGdtBufferSize;
+extern IA32_DESCRIPTOR gcSmiIdtr;
+extern VOID *gcSmiIdtrPtr;
+extern UINT64 gPhyMask;
+extern SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData;
+extern UINTN mSmmStackArrayBase;
+extern UINTN mSmmStackArrayEnd;
+extern UINTN mSmmStackSize;
+extern EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService;
+extern IA32_DESCRIPTOR gcSmiInitGdtr;
+extern SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
+extern UINTN mSemaphoreSize;
+extern SPIN_LOCK *mPFLock;
+extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;
+extern EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
+extern UINTN mSmmCpuSmramRangeCount;
+extern UINT8 mPhysicalAddressBits;
+
+//
+// Copy of the PcdPteMemoryEncryptionAddressOrMask
+//
+extern UINT64 mAddressEncMask;
+
+/**
+ Create 4G PageTable in SMRAM.
+
+ @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
+ @return PageTable Address
+
+**/
+UINT32
+Gen4GPageTable (
+ IN BOOLEAN Is32BitPageTable
+ );
+
+
+/**
+ Initialize global data for MP synchronization.
+
+ @param Stacks Base address of SMI stack buffer for all processors.
+ @param StackSize Stack size for each processor in SMM.
+ @param ShadowStackSize Shadow Stack size for each processor in SMM.
+
+**/
+UINT32
+InitializeMpServiceData (
+ IN VOID *Stacks,
+ IN UINTN StackSize,
+ IN UINTN ShadowStackSize
+ );
+
+/**
+ Initialize Timer for SMM AP Sync.
+
+**/
+VOID
+InitializeSmmTimer (
+ VOID
+ );
+
+/**
+ Start Timer for SMM AP Sync.
+
+**/
+UINT64
+EFIAPI
+StartSyncTimer (
+ VOID
+ );
+
+/**
+ Check if the SMM AP Sync timer is timeout.
+
+ @param Timer The start timer from the begin.
+
+**/
+BOOLEAN
+EFIAPI
+IsSyncTimerTimeout (
+ IN UINT64 Timer
+ );
+
+/**
+ Initialize IDT for SMM Stack Guard.
+
+**/
+VOID
+EFIAPI
+InitializeIDTSmmStackGuard (
+ VOID
+ );
+
+/**
+ Initialize Gdt for all processors.
+
+ @param[in] Cr3 CR3 value.
+ @param[out] GdtStepSize The step size for GDT table.
+
+ @return GdtBase for processor 0.
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)
+**/
+VOID *
+InitGdt (
+ IN UINTN Cr3,
+ OUT UINTN *GdtStepSize
+ );
+
+/**
+
+ Register the SMM Foundation entry point.
+
+ @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance
+ @param SmmEntryPoint SMM Foundation EntryPoint
+
+ @retval EFI_SUCCESS Successfully to register SMM foundation entry point
+
+**/
+EFI_STATUS
+EFIAPI
+RegisterSmmEntry (
+ IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This,
+ IN EFI_SMM_ENTRY_POINT SmmEntryPoint
+ );
+
+/**
+ Create PageTable for SMM use.
+
+ @return PageTable Address
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ );
+
+/**
+ Schedule a procedure to run on the specified CPU.
+
+ @param Procedure The address of the procedure to run
+ @param CpuIndex Target CPU number
+ @param ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS - The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ );
+
+/**
+ Schedule a procedure to run on the specified CPU in a blocking fashion.
+
+ @param Procedure The address of the procedure to run
+ @param CpuIndex Target CPU Index
+ @param ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmBlockingStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ );
+
+/**
+ This function sets the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to set for the memory region.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSetMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ );
+
+/**
+ This function clears the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to clear for the memory region.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmClearMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ );
+
+/**
+ Initialize MP synchronization data.
+
+**/
+VOID
+EFIAPI
+InitializeMpSyncData (
+ VOID
+ );
+
+/**
+
+ Find out SMRAM information including SMRR base and SMRR size.
+
+ @param SmrrBase SMRR base
+ @param SmrrSize SMRR size
+
+**/
+VOID
+FindSmramInfo (
+ OUT UINT32 *SmrrBase,
+ OUT UINT32 *SmrrSize
+ );
+
+/**
+ Relocate SmmBases for each processor.
+
+ Execute on first boot and all S3 resumes
+
+**/
+VOID
+EFIAPI
+SmmRelocateBases (
+ VOID
+ );
+
+/**
+ Page Fault handler for SMM use.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ );
+
+/**
+ Perform the remaining tasks.
+
+**/
+VOID
+PerformRemainingTasks (
+ VOID
+ );
+
+/**
+ Perform the pre tasks.
+
+**/
+VOID
+PerformPreTasks (
+ VOID
+ );
+
+/**
+ Initialize MSR spin lock by MSR index.
+
+ @param MsrIndex MSR index value.
+
+**/
+VOID
+InitMsrSpinLockByIndex (
+ IN UINT32 MsrIndex
+ );
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE
+ immediately after AP exits SMM.
+
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex,
+ IN volatile BOOLEAN *RebasedFlag
+ );
+
+/**
+Configure SMM Code Access Check feature for all processors.
+SMM Feature Control MSR will be locked after configuration.
+**/
+VOID
+ConfigSmmCodeAccessCheck (
+ VOID
+ );
+
+/**
+ Hook the code executed immediately after an RSM instruction on the currently
+ executing CPU. The mode of code executed immediately after RSM must be
+ detected, and the appropriate hook must be selected. Always clear the auto
+ HALT restart flag if it is set.
+
+ @param[in] CpuIndex The processor index for the currently
+ executing CPU.
+ @param[in] CpuState Pointer to SMRAM Save State Map for the
+ currently executing CPU.
+ @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to
+ 32-bit mode from 64-bit SMM.
+ @param[in] NewInstructionPointer Instruction pointer to use if resuming to
+ same mode as SMM.
+
+ @retval The value of the original instruction pointer before it was hooked.
+
+**/
+UINT64
+EFIAPI
+HookReturnFromSmm (
+ IN UINTN CpuIndex,
+ SMRAM_SAVE_STATE_MAP *CpuState,
+ UINT64 NewInstructionPointer32,
+ UINT64 NewInstructionPointer
+ );
+
+/**
+ Get the size of the SMI Handler in bytes.
+
+ @retval The size, in bytes, of the SMI Handler.
+
+**/
+UINTN
+EFIAPI
+GetSmiHandlerSize (
+ VOID
+ );
+
+/**
+ Install the SMI handler for the CPU specified by CpuIndex. This function
+ is called by the CPU that was elected as monarch during System Management
+ Mode initialization.
+
+ @param[in] CpuIndex The index of the CPU to install the custom SMI handler.
+ The value must be between 0 and the NumberOfCpus field
+ in the System Management System Table (SMST).
+ @param[in] SmBase The SMBASE address for the CPU specified by CpuIndex.
+ @param[in] SmiStack The stack to use when an SMI is processed by the
+ the CPU specified by CpuIndex.
+ @param[in] StackSize The size, in bytes, if the stack used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] GdtBase The base address of the GDT to use when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] GdtSize The size, in bytes, of the GDT used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] IdtBase The base address of the IDT to use when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] IdtSize The size, in bytes, of the IDT used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] Cr3 The base address of the page tables to use when an SMI
+ is processed by the CPU specified by CpuIndex.
+**/
+VOID
+EFIAPI
+InstallSmiHandler (
+ IN UINTN CpuIndex,
+ IN UINT32 SmBase,
+ IN VOID *SmiStack,
+ IN UINTN StackSize,
+ IN UINTN GdtBase,
+ IN UINTN GdtSize,
+ IN UINTN IdtBase,
+ IN UINTN IdtSize,
+ IN UINT32 Cr3
+ );
+
+/**
+ Search module name by input IP address and output it.
+
+ @param CallerIpAddress Caller instruction pointer.
+
+**/
+VOID
+DumpModuleInfoByIp (
+ IN UINTN CallerIpAddress
+ );
+
+/**
+ This function sets memory attribute according to MemoryAttributesTable.
+**/
+VOID
+SetMemMapAttributes (
+ VOID
+ );
+
+/**
+ This function sets UEFI memory attribute according to UEFI memory map.
+**/
+VOID
+SetUefiMemMapAttributes (
+ VOID
+ );
+
+/**
+ Return if the Address is forbidden as SMM communication buffer.
+
+ @param[in] Address the address to be checked
+
+ @return TRUE The address is forbidden as SMM communication buffer.
+ @return FALSE The address is allowed as SMM communication buffer.
+**/
+BOOLEAN
+IsSmmCommBufferForbiddenAddress (
+ IN UINT64 Address
+ );
+
+/**
+ This function caches the UEFI memory map information.
+**/
+VOID
+GetUefiMemoryMap (
+ VOID
+ );
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ );
+
+/**
+ Get page table base address and the depth of the page table.
+
+ @param[out] Base Page table base address.
+ @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
+**/
+VOID
+GetPageTable (
+ OUT UINTN *Base,
+ OUT BOOLEAN *FiveLevels OPTIONAL
+ );
+
+/**
+ This function sets the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to set for the memory region.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSetMemoryAttributesEx (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *IsSplitted OPTIONAL
+ );
+
+/**
+ This function clears the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to clear for the memory region.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmClearMemoryAttributesEx (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *IsSplitted OPTIONAL
+ );
+
+/**
+ This API provides a way to allocate memory for page table.
+
+ This API can be called more once to allocate memory for page tables.
+
+ Allocates the number of 4KB pages of type EfiRuntimeServicesData and returns a pointer to the
+ allocated buffer. The buffer returned is aligned on a 4KB boundary. If Pages is 0, then NULL
+ is returned. If there is not enough memory remaining to satisfy the request, then NULL is
+ returned.
+
+ @param Pages The number of 4 KB pages to allocate.
+
+ @return A pointer to the allocated buffer or NULL if allocation fails.
+
+**/
+VOID *
+AllocatePageTableMemory (
+ IN UINTN Pages
+ );
+
+/**
+ Allocate pages for code.
+
+ @param[in] Pages Number of pages to be allocated.
+
+ @return Allocated memory.
+**/
+VOID *
+AllocateCodePages (
+ IN UINTN Pages
+ );
+
+/**
+ Allocate aligned pages for code.
+
+ @param[in] Pages Number of pages to be allocated.
+ @param[in] Alignment The requested alignment of the allocation.
+ Must be a power of two.
+ If Alignment is zero, then byte alignment is used.
+
+ @return Allocated memory.
+**/
+VOID *
+AllocateAlignedCodePages (
+ IN UINTN Pages,
+ IN UINTN Alignment
+ );
+
+
+//
+// S3 related global variable and function prototype.
+//
+
+extern BOOLEAN mSmmS3Flag;
+
+/**
+ Initialize SMM S3 resume state structure used during S3 Resume.
+
+ @param[in] Cr3 The base address of the page tables to use in SMM.
+
+**/
+VOID
+InitSmmS3ResumeState (
+ IN UINT32 Cr3
+ );
+
+/**
+ Get ACPI CPU data.
+
+**/
+VOID
+GetAcpiCpuData (
+ VOID
+ );
+
+/**
+ Restore SMM Configuration in S3 boot path.
+
+**/
+VOID
+RestoreSmmConfigurationInS3 (
+ VOID
+ );
+
+/**
+ Get ACPI S3 enable flag.
+
+**/
+VOID
+GetAcpiS3EnableFlag (
+ VOID
+ );
+
+/**
+ Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
+
+ @param[in] ApHltLoopCode The address of the safe hlt-loop function.
+ @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
+ @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
+
+**/
+VOID
+TransferApToSafeState (
+ IN UINTN ApHltLoopCode,
+ IN UINTN TopOfStack,
+ IN UINTN NumberToFinishAddress
+ );
+
+/**
+ Set ShadowStack memory.
+
+ @param[in] Cr3 The page table base address.
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+
+ @retval EFI_SUCCESS The shadow stack memory is set.
+**/
+EFI_STATUS
+SetShadowStack (
+ IN UINTN Cr3,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ );
+
+/**
+ Set not present memory.
+
+ @param[in] Cr3 The page table base address.
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+
+ @retval EFI_SUCCESS The not present memory is set.
+**/
+EFI_STATUS
+SetNotPresentPage (
+ IN UINTN Cr3,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ );
+
+/**
+ Initialize the shadow stack related data structure.
+
+ @param CpuIndex The index of CPU.
+ @param ShadowStack The bottom of the shadow stack for this CPU.
+**/
+VOID
+InitShadowStack (
+ IN UINTN CpuIndex,
+ IN VOID *ShadowStack
+ );
+
+/**
+ This function set given attributes of the memory region specified by
+ BaseAddress and Length.
+
+ @param This The EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL instance.
+ @param BaseAddress The physical address that is the start address of
+ a memory region.
+ @param Length The size in bytes of the memory region.
+ @param Attributes The bit mask of attributes to set for the memory
+ region.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of
+ attributes that cannot be set together.
+ @retval EFI_UNSUPPORTED The processor does not support one or more
+ bytes of the memory resource range specified
+ by BaseAddress and Length.
+ The bit mask of attributes is not supported for
+ the memory resource range specified by
+ BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+EdkiiSmmSetMemoryAttributes (
+ IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ );
+
+/**
+ This function clears given attributes of the memory region specified by
+ BaseAddress and Length.
+
+ @param This The EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL instance.
+ @param BaseAddress The physical address that is the start address of
+ a memory region.
+ @param Length The size in bytes of the memory region.
+ @param Attributes The bit mask of attributes to clear for the memory
+ region.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of
+ attributes that cannot be cleared together.
+ @retval EFI_UNSUPPORTED The processor does not support one or more
+ bytes of the memory resource range specified
+ by BaseAddress and Length.
+ The bit mask of attributes is not supported for
+ the memory resource range specified by
+ BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+EdkiiSmmClearMemoryAttributes (
+ IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ );
+
+/**
+ This function retrieves the attributes of the memory region specified by
+ BaseAddress and Length. If different attributes are got from different part
+ of the memory region, EFI_NO_MAPPING will be returned.
+
+ @param This The EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL instance.
+ @param BaseAddress The physical address that is the start address of
+ a memory region.
+ @param Length The size in bytes of the memory region.
+ @param Attributes Pointer to attributes returned.
+
+ @retval EFI_SUCCESS The attributes got for the memory region.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes is NULL.
+ @retval EFI_NO_MAPPING Attributes are not consistent cross the memory
+ region.
+ @retval EFI_UNSUPPORTED The processor does not support one or more
+ bytes of the memory resource range specified
+ by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+EdkiiSmmGetMemoryAttributes (
+ IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 *Attributes
+ );
+
+/**
+ This function fixes up the address of the global variable or function
+ referred in SmmInit assembly files to be the absolute address.
+**/
+VOID
+EFIAPI
+PiSmmCpuSmmInitFixupAddress (
+ );
+
+/**
+ This function fixes up the address of the global variable or function
+ referred in SmiEntry assembly files to be the absolute address.
+**/
+VOID
+EFIAPI
+PiSmmCpuSmiEntryFixupAddress (
+ );
+
+/**
+ This function reads CR2 register when on-demand paging is enabled
+ for 64 bit and no action for 32 bit.
+
+ @param[out] *Cr2 Pointer to variable to hold CR2 register value.
+**/
+VOID
+SaveCr2 (
+ OUT UINTN *Cr2
+ );
+
+/**
+ This function writes into CR2 register when on-demand paging is enabled
+ for 64 bit and no action for 32 bit.
+
+ @param[in] Cr2 Value to write into CR2 register.
+**/
+VOID
+RestoreCr2 (
+ IN UINTN Cr2
+ );
+
+/**
+ Schedule a procedure to run on the specified CPU.
+
+ @param[in] Procedure The address of the procedure to run
+ @param[in] CpuIndex Target CPU Index
+ @param[in,out] ProcArguments The parameter to pass to the procedure
+ @param[in,out] Token This is an optional parameter that allows the caller to execute the
+ procedure in a blocking or non-blocking fashion. If it is NULL the
+ call is blocking, and the call will not return until the AP has
+ completed the procedure. If the token is not NULL, the call will
+ return immediately. The caller can check whether the procedure has
+ completed with CheckOnProcedure or WaitForProcedure.
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
+ execution of Procedure, either for blocking or non-blocking mode.
+ Zero means infinity. If the timeout expires before all APs return
+ from Procedure, then Procedure on the failed APs is terminated. If
+ the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
+ If the timeout expires in non-blocking mode, the timeout determined
+ can be through CheckOnProcedure or WaitForProcedure.
+ Note that timeout support is optional. Whether an implementation
+ supports this feature can be determined via the Attributes data
+ member.
+ @param[in,out] CpuStatus This optional pointer may be used to get the status code returned
+ by Procedure when it completes execution on the target AP, or with
+ EFI_TIMEOUT if the Procedure fails to complete within the optional
+ timeout. The implementation will update this variable with
+ EFI_NOT_READY prior to starting Procedure on the target AP.
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+InternalSmmStartupThisAp (
+ IN EFI_AP_PROCEDURE2 Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL,
+ IN OUT MM_COMPLETION *Token,
+ IN UINTN TimeoutInMicroseconds,
+ IN OUT EFI_STATUS *CpuStatus
+ );
+
+/**
+ Checks whether the input token is the current used token.
+
+ @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
+ BroadcastProcedure.
+
+ @retval TRUE The input token is the current used token.
+ @retval FALSE The input token is not the current used token.
+**/
+BOOLEAN
+IsTokenInUse (
+ IN SPIN_LOCK *Token
+ );
+
+/**
+ Checks status of specified AP.
+
+ This function checks whether the specified AP has finished the task assigned
+ by StartupThisAP(), and whether timeout expires.
+
+ @param[in] Token This parameter describes the token that was passed into DispatchProcedure or
+ BroadcastProcedure.
+
+ @retval EFI_SUCCESS Specified AP has finished task assigned by StartupThisAPs().
+ @retval EFI_NOT_READY Specified AP has not finished task and timeout has not expired.
+**/
+EFI_STATUS
+IsApReady (
+ IN SPIN_LOCK *Token
+ );
+
+/**
+ Check whether it is an present AP.
+
+ @param CpuIndex The AP index which calls this function.
+
+ @retval TRUE It's a present AP.
+ @retval TRUE This is not an AP or it is not present.
+
+**/
+BOOLEAN
+IsPresentAp (
+ IN UINTN CpuIndex
+ );
+
+/**
+ Worker function to execute a caller provided function on all enabled APs.
+
+ @param[in] Procedure A pointer to the function to be run on
+ enabled APs of the system.
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for
+ APs to return from Procedure, either for
+ blocking or non-blocking mode.
+ @param[in,out] ProcedureArguments The parameter passed into Procedure for
+ all APs.
+ @param[in,out] Token This is an optional parameter that allows the caller to execute the
+ procedure in a blocking or non-blocking fashion. If it is NULL the
+ call is blocking, and the call will not return until the AP has
+ completed the procedure. If the token is not NULL, the call will
+ return immediately. The caller can check whether the procedure has
+ completed with CheckOnProcedure or WaitForProcedure.
+ @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
+ by Procedure when it completes execution on the target AP, or with
+ EFI_TIMEOUT if the Procedure fails to complete within the optional
+ timeout. The implementation will update this variable with
+ EFI_NOT_READY prior to starting Procedure on the target AP.
+
+ @retval EFI_SUCCESS In blocking mode, all APs have finished before
+ the timeout expired.
+ @retval EFI_SUCCESS In non-blocking mode, function has been dispatched
+ to all enabled APs.
+ @retval others Failed to Startup all APs.
+
+**/
+EFI_STATUS
+InternalSmmStartupAllAPs (
+ IN EFI_AP_PROCEDURE2 Procedure,
+ IN UINTN TimeoutInMicroseconds,
+ IN OUT VOID *ProcedureArguments OPTIONAL,
+ IN OUT MM_COMPLETION *Token,
+ IN OUT EFI_STATUS *CPUStatus
+ );
+
+/**
+
+ Register the SMM Foundation entry point.
+
+ @param[in] Procedure A pointer to the code stream to be run on the designated target AP
+ of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
+ with the related definitions of
+ EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
+ If caller may pass a value of NULL to deregister any existing
+ startup procedure.
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
+ run by the AP. It is an optional common mailbox between APs and
+ the caller to share information
+
+ @retval EFI_SUCCESS The Procedure has been set successfully.
+ @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
+
+**/
+EFI_STATUS
+RegisterStartupProcedure (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN OUT VOID *ProcedureArguments OPTIONAL
+ );
+
+/**
+ Allocate buffer for SpinLock and Wrapper function buffer.
+
+**/
+VOID
+InitializeDataForMmMp (
+ VOID
+ );
+
+/**
+ Return whether access to non-SMRAM is restricted.
+
+ @retval TRUE Access to non-SMRAM is restricted.
+ @retval FALSE Access to non-SMRAM is not restricted.
+**/
+BOOLEAN
+IsRestrictedMemoryAccess (
+ VOID
+ );
+
+#endif
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
new file mode 100644
index 00000000..df745216
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
@@ -0,0 +1,153 @@
+## @file
+# CPU SMM driver.
+#
+# This SMM driver performs SMM initialization, deploy SMM Entry Vector,
+# provides CPU specific services in SMM.
+#
+# Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = PiSmmCpuDxeSmm
+ MODULE_UNI_FILE = PiSmmCpuDxeSmm.uni
+ FILE_GUID = A3FF0EF5-0C28-42f5-B544-8C7DE1E80014
+ MODULE_TYPE = DXE_SMM_DRIVER
+ VERSION_STRING = 1.0
+ PI_SPECIFICATION_VERSION = 0x0001000A
+ ENTRY_POINT = PiCpuSmmEntry
+
+#
+# The following information is for reference only and not required by the build tools.
+#
+# VALID_ARCHITECTURES = IA32 X64
+#
+
+[Sources]
+ PiSmmCpuDxeSmm.c
+ PiSmmCpuDxeSmm.h
+ MpService.c
+ SyncTimer.c
+ CpuS3.c
+ CpuService.c
+ CpuService.h
+ SmmProfile.c
+ SmmProfile.h
+ SmmProfileInternal.h
+ SmramSaveState.c
+ SmmCpuMemoryManagement.c
+ SmmMp.h
+ SmmMp.c
+
+[Sources.Ia32]
+ Ia32/Semaphore.c
+ Ia32/PageTbl.c
+ Ia32/SmmFuncsArch.c
+ Ia32/SmmProfileArch.c
+ Ia32/SmmProfileArch.h
+ Ia32/SmmInit.nasm
+ Ia32/SmiEntry.nasm
+ Ia32/SmiException.nasm
+ Ia32/MpFuncs.nasm
+ Ia32/Cet.nasm
+
+[Sources.X64]
+ X64/Semaphore.c
+ X64/PageTbl.c
+ X64/SmmFuncsArch.c
+ X64/SmmProfileArch.c
+ X64/SmmProfileArch.h
+ X64/SmmInit.nasm
+ X64/SmiEntry.nasm
+ X64/SmiException.nasm
+ X64/MpFuncs.nasm
+ X64/Cet.nasm
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ UefiCpuPkg/UefiCpuPkg.dec
+
+[LibraryClasses]
+ UefiDriverEntryPoint
+ UefiRuntimeServicesTableLib
+ PcdLib
+ DebugLib
+ BaseLib
+ SynchronizationLib
+ BaseMemoryLib
+ MtrrLib
+ IoLib
+ TimerLib
+ SmmServicesTableLib
+ MemoryAllocationLib
+ DebugAgentLib
+ HobLib
+ PciLib
+ LocalApicLib
+ UefiCpuLib
+ SmmCpuPlatformHookLib
+ CpuExceptionHandlerLib
+ UefiLib
+ DxeServicesTableLib
+ CpuLib
+ ReportStatusCodeLib
+ SmmCpuFeaturesLib
+ PeCoffGetEntryPointLib
+
+[Protocols]
+ gEfiSmmAccess2ProtocolGuid ## CONSUMES
+ gEfiMpServiceProtocolGuid ## CONSUMES
+ gEfiSmmConfigurationProtocolGuid ## PRODUCES
+ gEfiSmmCpuProtocolGuid ## PRODUCES
+ gEfiSmmReadyToLockProtocolGuid ## NOTIFY
+ gEfiSmmCpuServiceProtocolGuid ## PRODUCES
+ gEdkiiSmmMemoryAttributeProtocolGuid ## PRODUCES
+ gEfiMmMpProtocolGuid ## PRODUCES
+
+[Guids]
+ gEfiAcpiVariableGuid ## SOMETIMES_CONSUMES ## HOB # it is used for S3 boot.
+ gEdkiiPiSmmMemoryAttributesTableGuid ## CONSUMES ## SystemTable
+ gEfiMemoryAttributesTableGuid ## CONSUMES ## SystemTable
+
+[FeaturePcd]
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmDebug ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmBlockStartupThisAp ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmEnableBspElection ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugSupport ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileEnable ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileRingBuffer ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmFeatureControlMsrLock ## CONSUMES
+
+[Pcd]
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber ## SOMETIMES_CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileSize ## SOMETIMES_CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackSize ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmApSyncTimeout ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuS3DataAddress ## SOMETIMES_CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress ## SOMETIMES_PRODUCES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmShadowStackSize ## SOMETIMES_CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdAcpiS3Enable ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdPteMemoryEncryptionAddressOrMask ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdNullPointerDetectionPropertyMask ## CONSUMES
+ gEfiMdeModulePkgTokenSpaceGuid.PcdHeapGuardPropertyMask ## CONSUMES
+ gEfiMdePkgTokenSpaceGuid.PcdControlFlowEnforcementPropertyMask ## CONSUMES
+
+[FixedPcd]
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmMpTokenCountPerChunk ## CONSUMES
+
+[Pcd.X64]
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmRestrictedMemoryAccess ## CONSUMES
+
+[Depex]
+ gEfiMpServiceProtocolGuid
+
+[UserExtensions.TianoCore."ExtraFiles"]
+ PiSmmCpuDxeSmmExtra.uni
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.uni b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.uni
new file mode 100644
index 00000000..1ccae17b
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.uni
@@ -0,0 +1,15 @@
+// /** @file
+// CPU SMM driver.
+//
+// This SMM driver performs SMM initialization, deploy SMM Entry Vector,
+// provides CPU specific services in SMM.
+//
+// Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+#string STR_MODULE_ABSTRACT #language en-US "CPU SMM driver"
+
+#string STR_MODULE_DESCRIPTION #language en-US "This SMM driver performs SMM initialization, deploys SMM Entry Vector, and provides CPU-specific services in SMM."
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmmExtra.uni b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmmExtra.uni
new file mode 100644
index 00000000..97e2815b
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmmExtra.uni
@@ -0,0 +1,12 @@
+// /** @file
+// PiSmmCpuDxeSmm Localized Strings and Content
+//
+// Copyright (c) 2013 - 2015, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+#string STR_PROPERTIES_MODULE_NAME
+#language en-US
+"Processor SMM Initialization DXE Driver"
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
new file mode 100644
index 00000000..4f01579b
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
@@ -0,0 +1,1571 @@
+/** @file
+
+Copyright (c) 2016 - 2019, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+//
+// attributes for reserved memory before it is promoted to system memory
+//
+#define EFI_MEMORY_PRESENT 0x0100000000000000ULL
+#define EFI_MEMORY_INITIALIZED 0x0200000000000000ULL
+#define EFI_MEMORY_TESTED 0x0400000000000000ULL
+
+#define PREVIOUS_MEMORY_DESCRIPTOR(MemoryDescriptor, Size) \
+ ((EFI_MEMORY_DESCRIPTOR *)((UINT8 *)(MemoryDescriptor) - (Size)))
+
+EFI_MEMORY_DESCRIPTOR *mUefiMemoryMap;
+UINTN mUefiMemoryMapSize;
+UINTN mUefiDescriptorSize;
+
+EFI_GCD_MEMORY_SPACE_DESCRIPTOR *mGcdMemSpace = NULL;
+UINTN mGcdMemNumberOfDesc = 0;
+
+EFI_MEMORY_ATTRIBUTES_TABLE *mUefiMemoryAttributesTable = NULL;
+
+PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
+ {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64},
+ {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64},
+ {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64},
+};
+
+UINTN mInternalCr3;
+
+/**
+ Set the internal page table base address.
+ If it is non zero, further MemoryAttribute modification will be on this page table.
+ If it is zero, further MemoryAttribute modification will be on real page table.
+
+ @param Cr3 page table base.
+**/
+VOID
+SetPageTableBase (
+ IN UINTN Cr3
+ )
+{
+ mInternalCr3 = Cr3;
+}
+
+/**
+ Return length according to page attributes.
+
+ @param[in] PageAttributes The page attribute of the page entry.
+
+ @return The length of page entry.
+**/
+UINTN
+PageAttributeToLength (
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINTN Index;
+ for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
+ if (PageAttribute == mPageAttributeTable[Index].Attribute) {
+ return (UINTN)mPageAttributeTable[Index].Length;
+ }
+ }
+ return 0;
+}
+
+/**
+ Return address mask according to page attributes.
+
+ @param[in] PageAttributes The page attribute of the page entry.
+
+ @return The address mask of page entry.
+**/
+UINTN
+PageAttributeToMask (
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINTN Index;
+ for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
+ if (PageAttribute == mPageAttributeTable[Index].Attribute) {
+ return (UINTN)mPageAttributeTable[Index].AddressMask;
+ }
+ }
+ return 0;
+}
+
+/**
+ Return page table entry to match the address.
+
+ @param[in] Address The address to be checked.
+ @param[out] PageAttributes The page attribute of the page entry.
+
+ @return The page entry.
+**/
+VOID *
+GetPageTableEntry (
+ IN PHYSICAL_ADDRESS Address,
+ OUT PAGE_ATTRIBUTE *PageAttribute
+ )
+{
+ UINTN Index1;
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINTN Index5;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ UINT64 *L5PageTable;
+ UINTN PageTableBase;
+ BOOLEAN Enable5LevelPaging;
+
+ GetPageTable (&PageTableBase, &Enable5LevelPaging);
+
+ Index5 = ((UINTN)RShiftU64 (Address, 48)) & PAGING_PAE_INDEX_MASK;
+ Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_PAE_INDEX_MASK;
+ Index3 = ((UINTN)Address >> 30) & PAGING_PAE_INDEX_MASK;
+ Index2 = ((UINTN)Address >> 21) & PAGING_PAE_INDEX_MASK;
+ Index1 = ((UINTN)Address >> 12) & PAGING_PAE_INDEX_MASK;
+
+ if (sizeof(UINTN) == sizeof(UINT64)) {
+ if (Enable5LevelPaging) {
+ L5PageTable = (UINT64 *)PageTableBase;
+ if (L5PageTable[Index5] == 0) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+
+ L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ } else {
+ L4PageTable = (UINT64 *)PageTableBase;
+ }
+ if (L4PageTable[Index4] == 0) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+
+ L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ } else {
+ L3PageTable = (UINT64 *)PageTableBase;
+ }
+ if (L3PageTable[Index3] == 0) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
+ // 1G
+ *PageAttribute = Page1G;
+ return &L3PageTable[Index3];
+ }
+
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable[Index2] == 0) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ *PageAttribute = Page2M;
+ return &L2PageTable[Index2];
+ }
+
+ // 4k
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if ((L1PageTable[Index1] == 0) && (Address != 0)) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ *PageAttribute = Page4K;
+ return &L1PageTable[Index1];
+}
+
+/**
+ Return memory attributes of page entry.
+
+ @param[in] PageEntry The page entry.
+
+ @return Memory attributes of page entry.
+**/
+UINT64
+GetAttributesFromPageEntry (
+ IN UINT64 *PageEntry
+ )
+{
+ UINT64 Attributes;
+ Attributes = 0;
+ if ((*PageEntry & IA32_PG_P) == 0) {
+ Attributes |= EFI_MEMORY_RP;
+ }
+ if ((*PageEntry & IA32_PG_RW) == 0) {
+ Attributes |= EFI_MEMORY_RO;
+ }
+ if ((*PageEntry & IA32_PG_NX) != 0) {
+ Attributes |= EFI_MEMORY_XP;
+ }
+ return Attributes;
+}
+
+/**
+ Modify memory attributes of page entry.
+
+ @param[in] PageEntry The page entry.
+ @param[in] Attributes The bit mask of attributes to modify for the memory region.
+ @param[in] IsSet TRUE means to set attributes. FALSE means to clear attributes.
+ @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
+**/
+VOID
+ConvertPageEntryAttribute (
+ IN UINT64 *PageEntry,
+ IN UINT64 Attributes,
+ IN BOOLEAN IsSet,
+ OUT BOOLEAN *IsModified
+ )
+{
+ UINT64 CurrentPageEntry;
+ UINT64 NewPageEntry;
+
+ CurrentPageEntry = *PageEntry;
+ NewPageEntry = CurrentPageEntry;
+ if ((Attributes & EFI_MEMORY_RP) != 0) {
+ if (IsSet) {
+ NewPageEntry &= ~(UINT64)IA32_PG_P;
+ } else {
+ NewPageEntry |= IA32_PG_P;
+ }
+ }
+ if ((Attributes & EFI_MEMORY_RO) != 0) {
+ if (IsSet) {
+ NewPageEntry &= ~(UINT64)IA32_PG_RW;
+ if (mInternalCr3 != 0) {
+ // Environment setup
+ // ReadOnly page need set Dirty bit for shadow stack
+ NewPageEntry |= IA32_PG_D;
+ // Clear user bit for supervisor shadow stack
+ NewPageEntry &= ~(UINT64)IA32_PG_U;
+ } else {
+ // Runtime update
+ // Clear dirty bit for non shadow stack, to protect RO page.
+ NewPageEntry &= ~(UINT64)IA32_PG_D;
+ }
+ } else {
+ NewPageEntry |= IA32_PG_RW;
+ }
+ }
+ if ((Attributes & EFI_MEMORY_XP) != 0) {
+ if (mXdSupported) {
+ if (IsSet) {
+ NewPageEntry |= IA32_PG_NX;
+ } else {
+ NewPageEntry &= ~IA32_PG_NX;
+ }
+ }
+ }
+ *PageEntry = NewPageEntry;
+ if (CurrentPageEntry != NewPageEntry) {
+ *IsModified = TRUE;
+ DEBUG ((DEBUG_VERBOSE, "ConvertPageEntryAttribute 0x%lx", CurrentPageEntry));
+ DEBUG ((DEBUG_VERBOSE, "->0x%lx\n", NewPageEntry));
+ } else {
+ *IsModified = FALSE;
+ }
+}
+
+/**
+ This function returns if there is need to split page entry.
+
+ @param[in] BaseAddress The base address to be checked.
+ @param[in] Length The length to be checked.
+ @param[in] PageEntry The page entry to be checked.
+ @param[in] PageAttribute The page attribute of the page entry.
+
+ @retval SplitAttributes on if there is need to split page entry.
+**/
+PAGE_ATTRIBUTE
+NeedSplitPage (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 *PageEntry,
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINT64 PageEntryLength;
+
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+
+ if (((BaseAddress & (PageEntryLength - 1)) == 0) && (Length >= PageEntryLength)) {
+ return PageNone;
+ }
+
+ if (((BaseAddress & PAGING_2M_MASK) != 0) || (Length < SIZE_2MB)) {
+ return Page4K;
+ }
+
+ return Page2M;
+}
+
+/**
+ This function splits one page entry to small page entries.
+
+ @param[in] PageEntry The page entry to be splitted.
+ @param[in] PageAttribute The page attribute of the page entry.
+ @param[in] SplitAttribute How to split the page entry.
+
+ @retval RETURN_SUCCESS The page entry is splitted.
+ @retval RETURN_UNSUPPORTED The page entry does not support to be splitted.
+ @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
+**/
+RETURN_STATUS
+SplitPage (
+ IN UINT64 *PageEntry,
+ IN PAGE_ATTRIBUTE PageAttribute,
+ IN PAGE_ATTRIBUTE SplitAttribute
+ )
+{
+ UINT64 BaseAddress;
+ UINT64 *NewPageEntry;
+ UINTN Index;
+
+ ASSERT (PageAttribute == Page2M || PageAttribute == Page1G);
+
+ if (PageAttribute == Page2M) {
+ //
+ // Split 2M to 4K
+ //
+ ASSERT (SplitAttribute == Page4K);
+ if (SplitAttribute == Page4K) {
+ NewPageEntry = AllocatePageTableMemory (1);
+ DEBUG ((DEBUG_VERBOSE, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = *PageEntry & PAGING_2M_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_4KB * Index) | mAddressEncMask | ((*PageEntry) & PAGE_PROGATE_BITS);
+ }
+ (*PageEntry) = (UINT64)(UINTN)NewPageEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else if (PageAttribute == Page1G) {
+ //
+ // Split 1G to 2M
+ // No need support 1G->4K directly, we should use 1G->2M, then 2M->4K to get more compact page table.
+ //
+ ASSERT (SplitAttribute == Page2M || SplitAttribute == Page4K);
+ if ((SplitAttribute == Page2M || SplitAttribute == Page4K)) {
+ NewPageEntry = AllocatePageTableMemory (1);
+ DEBUG ((DEBUG_VERBOSE, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = *PageEntry & PAGING_1G_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_2MB * Index) | mAddressEncMask | IA32_PG_PS | ((*PageEntry) & PAGE_PROGATE_BITS);
+ }
+ (*PageEntry) = (UINT64)(UINTN)NewPageEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+}
+
+/**
+ This function modifies the page attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ Caller should make sure BaseAddress and Length is at page boundary.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to modify for the memory region.
+ @param[in] IsSet TRUE means to set attributes. FALSE means to clear attributes.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+ @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
+
+ @retval RETURN_SUCCESS The attributes were modified for the memory region.
+ @retval RETURN_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval RETURN_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval RETURN_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+**/
+RETURN_STATUS
+EFIAPI
+ConvertMemoryPageAttributes (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ IN BOOLEAN IsSet,
+ OUT BOOLEAN *IsSplitted, OPTIONAL
+ OUT BOOLEAN *IsModified OPTIONAL
+ )
+{
+ UINT64 *PageEntry;
+ PAGE_ATTRIBUTE PageAttribute;
+ UINTN PageEntryLength;
+ PAGE_ATTRIBUTE SplitAttribute;
+ RETURN_STATUS Status;
+ BOOLEAN IsEntryModified;
+ EFI_PHYSICAL_ADDRESS MaximumSupportMemAddress;
+
+ ASSERT (Attributes != 0);
+ ASSERT ((Attributes & ~EFI_MEMORY_ATTRIBUTE_MASK) == 0);
+
+ ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
+ ASSERT ((Length & (SIZE_4KB - 1)) == 0);
+
+ if (Length == 0) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ MaximumSupportMemAddress = (EFI_PHYSICAL_ADDRESS)(UINTN)(LShiftU64 (1, mPhysicalAddressBits) - 1);
+ if (BaseAddress > MaximumSupportMemAddress) {
+ return RETURN_UNSUPPORTED;
+ }
+ if (Length > MaximumSupportMemAddress) {
+ return RETURN_UNSUPPORTED;
+ }
+ if ((Length != 0) && (BaseAddress > MaximumSupportMemAddress - (Length - 1))) {
+ return RETURN_UNSUPPORTED;
+ }
+
+// DEBUG ((DEBUG_ERROR, "ConvertMemoryPageAttributes(%x) - %016lx, %016lx, %02lx\n", IsSet, BaseAddress, Length, Attributes));
+
+ if (IsSplitted != NULL) {
+ *IsSplitted = FALSE;
+ }
+ if (IsModified != NULL) {
+ *IsModified = FALSE;
+ }
+
+ //
+ // Below logic is to check 2M/4K page to make sure we do not waste memory.
+ //
+ while (Length != 0) {
+ PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
+ if (PageEntry == NULL) {
+ return RETURN_UNSUPPORTED;
+ }
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+ SplitAttribute = NeedSplitPage (BaseAddress, Length, PageEntry, PageAttribute);
+ if (SplitAttribute == PageNone) {
+ ConvertPageEntryAttribute (PageEntry, Attributes, IsSet, &IsEntryModified);
+ if (IsEntryModified) {
+ if (IsModified != NULL) {
+ *IsModified = TRUE;
+ }
+ }
+ //
+ // Convert success, move to next
+ //
+ BaseAddress += PageEntryLength;
+ Length -= PageEntryLength;
+ } else {
+ Status = SplitPage (PageEntry, PageAttribute, SplitAttribute);
+ if (RETURN_ERROR (Status)) {
+ return RETURN_UNSUPPORTED;
+ }
+ if (IsSplitted != NULL) {
+ *IsSplitted = TRUE;
+ }
+ if (IsModified != NULL) {
+ *IsModified = TRUE;
+ }
+ //
+ // Just split current page
+ // Convert success in next around
+ //
+ }
+ }
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ FlushTlb on current processor.
+
+ @param[in,out] Buffer Pointer to private data buffer.
+**/
+VOID
+EFIAPI
+FlushTlbOnCurrentProcessor (
+ IN OUT VOID *Buffer
+ )
+{
+ CpuFlushTlb ();
+}
+
+/**
+ FlushTlb for all processors.
+**/
+VOID
+FlushTlbForAll (
+ VOID
+ )
+{
+ UINTN Index;
+
+ FlushTlbOnCurrentProcessor (NULL);
+
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
+ if (Index != gSmst->CurrentlyExecutingCpu) {
+ // Force to start up AP in blocking mode,
+ SmmBlockingStartupThisAp (FlushTlbOnCurrentProcessor, Index, NULL);
+ // Do not check return status, because AP might not be present in some corner cases.
+ }
+ }
+}
+
+/**
+ This function sets the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to set for the memory region.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSetMemoryAttributesEx (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *IsSplitted OPTIONAL
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN IsModified;
+
+ Status = ConvertMemoryPageAttributes (BaseAddress, Length, Attributes, TRUE, IsSplitted, &IsModified);
+ if (!EFI_ERROR(Status)) {
+ if (IsModified) {
+ //
+ // Flush TLB as last step
+ //
+ FlushTlbForAll();
+ }
+ }
+
+ return Status;
+}
+
+/**
+ This function clears the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to clear for the memory region.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be cleared together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not supported for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmClearMemoryAttributesEx (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *IsSplitted OPTIONAL
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN IsModified;
+
+ Status = ConvertMemoryPageAttributes (BaseAddress, Length, Attributes, FALSE, IsSplitted, &IsModified);
+ if (!EFI_ERROR(Status)) {
+ if (IsModified) {
+ //
+ // Flush TLB as last step
+ //
+ FlushTlbForAll();
+ }
+ }
+
+ return Status;
+}
+
+/**
+ This function sets the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to set for the memory region.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not supported for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSetMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ return SmmSetMemoryAttributesEx (BaseAddress, Length, Attributes, NULL);
+}
+
+/**
+ This function clears the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to clear for the memory region.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be cleared together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not supported for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmClearMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ return SmmClearMemoryAttributesEx (BaseAddress, Length, Attributes, NULL);
+}
+
+/**
+ Set ShadowStack memory.
+
+ @param[in] Cr3 The page table base address.
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+
+ @retval EFI_SUCCESS The shadow stack memory is set.
+**/
+EFI_STATUS
+SetShadowStack (
+ IN UINTN Cr3,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ EFI_STATUS Status;
+
+ SetPageTableBase (Cr3);
+
+ Status = SmmSetMemoryAttributes (BaseAddress, Length, EFI_MEMORY_RO);
+
+ SetPageTableBase (0);
+
+ return Status;
+}
+
+/**
+ Set not present memory.
+
+ @param[in] Cr3 The page table base address.
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+
+ @retval EFI_SUCCESS The not present memory is set.
+**/
+EFI_STATUS
+SetNotPresentPage (
+ IN UINTN Cr3,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ EFI_STATUS Status;
+
+ SetPageTableBase (Cr3);
+
+ Status = SmmSetMemoryAttributes (BaseAddress, Length, EFI_MEMORY_RP);
+
+ SetPageTableBase (0);
+
+ return Status;
+}
+
+/**
+ Retrieves a pointer to the system configuration table from the SMM System Table
+ based on a specified GUID.
+
+ @param[in] TableGuid The pointer to table's GUID type.
+ @param[out] Table The pointer to the table associated with TableGuid in the EFI System Table.
+
+ @retval EFI_SUCCESS A configuration table matching TableGuid was found.
+ @retval EFI_NOT_FOUND A configuration table matching TableGuid could not be found.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmGetSystemConfigurationTable (
+ IN EFI_GUID *TableGuid,
+ OUT VOID **Table
+ )
+{
+ UINTN Index;
+
+ ASSERT (TableGuid != NULL);
+ ASSERT (Table != NULL);
+
+ *Table = NULL;
+ for (Index = 0; Index < gSmst->NumberOfTableEntries; Index++) {
+ if (CompareGuid (TableGuid, &(gSmst->SmmConfigurationTable[Index].VendorGuid))) {
+ *Table = gSmst->SmmConfigurationTable[Index].VendorTable;
+ return EFI_SUCCESS;
+ }
+ }
+
+ return EFI_NOT_FOUND;
+}
+
+/**
+ This function sets SMM save state buffer to be RW and XP.
+**/
+VOID
+PatchSmmSaveStateMap (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN TileCodeSize;
+ UINTN TileDataSize;
+ UINTN TileSize;
+
+ TileCodeSize = GetSmiHandlerSize ();
+ TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
+ TileDataSize = (SMRAM_SAVE_STATE_MAP_OFFSET - SMM_PSD_OFFSET) + sizeof (SMRAM_SAVE_STATE_MAP);
+ TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
+ TileSize = TileDataSize + TileCodeSize - 1;
+ TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
+
+ DEBUG ((DEBUG_INFO, "PatchSmmSaveStateMap:\n"));
+ for (Index = 0; Index < mMaxNumberOfCpus - 1; Index++) {
+ //
+ // Code
+ //
+ SmmSetMemoryAttributes (
+ mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET,
+ TileCodeSize,
+ EFI_MEMORY_RO
+ );
+ SmmClearMemoryAttributes (
+ mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET,
+ TileCodeSize,
+ EFI_MEMORY_XP
+ );
+
+ //
+ // Data
+ //
+ SmmClearMemoryAttributes (
+ mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET + TileCodeSize,
+ TileSize - TileCodeSize,
+ EFI_MEMORY_RO
+ );
+ SmmSetMemoryAttributes (
+ mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET + TileCodeSize,
+ TileSize - TileCodeSize,
+ EFI_MEMORY_XP
+ );
+ }
+
+ //
+ // Code
+ //
+ SmmSetMemoryAttributes (
+ mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET,
+ TileCodeSize,
+ EFI_MEMORY_RO
+ );
+ SmmClearMemoryAttributes (
+ mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET,
+ TileCodeSize,
+ EFI_MEMORY_XP
+ );
+
+ //
+ // Data
+ //
+ SmmClearMemoryAttributes (
+ mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET + TileCodeSize,
+ SIZE_32KB - TileCodeSize,
+ EFI_MEMORY_RO
+ );
+ SmmSetMemoryAttributes (
+ mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET + TileCodeSize,
+ SIZE_32KB - TileCodeSize,
+ EFI_MEMORY_XP
+ );
+}
+
+/**
+ This function sets GDT/IDT buffer to be RO and XP.
+**/
+VOID
+PatchGdtIdtMap (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS BaseAddress;
+ UINTN Size;
+
+ //
+ // GDT
+ //
+ DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - GDT:\n"));
+
+ BaseAddress = mGdtBuffer;
+ Size = ALIGN_VALUE(mGdtBufferSize, SIZE_4KB);
+ //
+ // The range should have been set to RO
+ // if it is allocated with EfiRuntimeServicesCode.
+ //
+ SmmSetMemoryAttributes (
+ BaseAddress,
+ Size,
+ EFI_MEMORY_XP
+ );
+
+ //
+ // IDT
+ //
+ DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - IDT:\n"));
+
+ BaseAddress = gcSmiIdtr.Base;
+ Size = ALIGN_VALUE(gcSmiIdtr.Limit + 1, SIZE_4KB);
+ //
+ // The range should have been set to RO
+ // if it is allocated with EfiRuntimeServicesCode.
+ //
+ SmmSetMemoryAttributes (
+ BaseAddress,
+ Size,
+ EFI_MEMORY_XP
+ );
+}
+
+/**
+ This function sets memory attribute according to MemoryAttributesTable.
+**/
+VOID
+SetMemMapAttributes (
+ VOID
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMap;
+ EFI_MEMORY_DESCRIPTOR *MemoryMapStart;
+ UINTN MemoryMapEntryCount;
+ UINTN DescriptorSize;
+ UINTN Index;
+ EDKII_PI_SMM_MEMORY_ATTRIBUTES_TABLE *MemoryAttributesTable;
+
+ SmmGetSystemConfigurationTable (&gEdkiiPiSmmMemoryAttributesTableGuid, (VOID **)&MemoryAttributesTable);
+ if (MemoryAttributesTable == NULL) {
+ DEBUG ((DEBUG_INFO, "MemoryAttributesTable - NULL\n"));
+ return ;
+ }
+
+ DEBUG ((DEBUG_INFO, "MemoryAttributesTable:\n"));
+ DEBUG ((DEBUG_INFO, " Version - 0x%08x\n", MemoryAttributesTable->Version));
+ DEBUG ((DEBUG_INFO, " NumberOfEntries - 0x%08x\n", MemoryAttributesTable->NumberOfEntries));
+ DEBUG ((DEBUG_INFO, " DescriptorSize - 0x%08x\n", MemoryAttributesTable->DescriptorSize));
+
+ MemoryMapEntryCount = MemoryAttributesTable->NumberOfEntries;
+ DescriptorSize = MemoryAttributesTable->DescriptorSize;
+ MemoryMapStart = (EFI_MEMORY_DESCRIPTOR *)(MemoryAttributesTable + 1);
+ MemoryMap = MemoryMapStart;
+ for (Index = 0; Index < MemoryMapEntryCount; Index++) {
+ DEBUG ((DEBUG_INFO, "Entry (0x%x)\n", MemoryMap));
+ DEBUG ((DEBUG_INFO, " Type - 0x%x\n", MemoryMap->Type));
+ DEBUG ((DEBUG_INFO, " PhysicalStart - 0x%016lx\n", MemoryMap->PhysicalStart));
+ DEBUG ((DEBUG_INFO, " VirtualStart - 0x%016lx\n", MemoryMap->VirtualStart));
+ DEBUG ((DEBUG_INFO, " NumberOfPages - 0x%016lx\n", MemoryMap->NumberOfPages));
+ DEBUG ((DEBUG_INFO, " Attribute - 0x%016lx\n", MemoryMap->Attribute));
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, DescriptorSize);
+ }
+
+ MemoryMap = MemoryMapStart;
+ for (Index = 0; Index < MemoryMapEntryCount; Index++) {
+ DEBUG ((DEBUG_VERBOSE, "SetAttribute: Memory Entry - 0x%lx, 0x%x\n", MemoryMap->PhysicalStart, MemoryMap->NumberOfPages));
+ switch (MemoryMap->Type) {
+ case EfiRuntimeServicesCode:
+ SmmSetMemoryAttributes (
+ MemoryMap->PhysicalStart,
+ EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
+ EFI_MEMORY_RO
+ );
+ break;
+ case EfiRuntimeServicesData:
+ SmmSetMemoryAttributes (
+ MemoryMap->PhysicalStart,
+ EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
+ EFI_MEMORY_XP
+ );
+ break;
+ default:
+ SmmSetMemoryAttributes (
+ MemoryMap->PhysicalStart,
+ EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
+ EFI_MEMORY_XP
+ );
+ break;
+ }
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, DescriptorSize);
+ }
+
+ PatchSmmSaveStateMap ();
+ PatchGdtIdtMap ();
+
+ return ;
+}
+
+/**
+ Sort memory map entries based upon PhysicalStart, from low to high.
+
+ @param MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param MemoryMapSize Size, in bytes, of the MemoryMap buffer.
+ @param DescriptorSize Size, in bytes, of an individual EFI_MEMORY_DESCRIPTOR.
+**/
+STATIC
+VOID
+SortMemoryMap (
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ IN UINTN MemoryMapSize,
+ IN UINTN DescriptorSize
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *NextMemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEnd;
+ EFI_MEMORY_DESCRIPTOR TempMemoryMap;
+
+ MemoryMapEntry = MemoryMap;
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ MemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) MemoryMap + MemoryMapSize);
+ while (MemoryMapEntry < MemoryMapEnd) {
+ while (NextMemoryMapEntry < MemoryMapEnd) {
+ if (MemoryMapEntry->PhysicalStart > NextMemoryMapEntry->PhysicalStart) {
+ CopyMem (&TempMemoryMap, MemoryMapEntry, sizeof(EFI_MEMORY_DESCRIPTOR));
+ CopyMem (MemoryMapEntry, NextMemoryMapEntry, sizeof(EFI_MEMORY_DESCRIPTOR));
+ CopyMem (NextMemoryMapEntry, &TempMemoryMap, sizeof(EFI_MEMORY_DESCRIPTOR));
+ }
+
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (NextMemoryMapEntry, DescriptorSize);
+ }
+
+ MemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ }
+}
+
+/**
+ Return if a UEFI memory page should be marked as not present in SMM page table.
+ If the memory map entries type is
+ EfiLoaderCode/Data, EfiBootServicesCode/Data, EfiConventionalMemory,
+ EfiUnusableMemory, EfiACPIReclaimMemory, return TRUE.
+ Or return FALSE.
+
+ @param[in] MemoryMap A pointer to the memory descriptor.
+
+ @return TRUE The memory described will be marked as not present in SMM page table.
+ @return FALSE The memory described will not be marked as not present in SMM page table.
+**/
+BOOLEAN
+IsUefiPageNotPresent (
+ IN EFI_MEMORY_DESCRIPTOR *MemoryMap
+ )
+{
+ switch (MemoryMap->Type) {
+ case EfiLoaderCode:
+ case EfiLoaderData:
+ case EfiBootServicesCode:
+ case EfiBootServicesData:
+ case EfiConventionalMemory:
+ case EfiUnusableMemory:
+ case EfiACPIReclaimMemory:
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+/**
+ Merge continuous memory map entries whose type is
+ EfiLoaderCode/Data, EfiBootServicesCode/Data, EfiConventionalMemory,
+ EfiUnusableMemory, EfiACPIReclaimMemory, because the memory described by
+ these entries will be set as NOT present in SMM page table.
+
+ @param[in, out] MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param[in, out] MemoryMapSize A pointer to the size, in bytes, of the
+ MemoryMap buffer. On input, this is the size of
+ the current memory map. On output,
+ it is the size of new memory map after merge.
+ @param[in] DescriptorSize Size, in bytes, of an individual EFI_MEMORY_DESCRIPTOR.
+**/
+STATIC
+VOID
+MergeMemoryMapForNotPresentEntry (
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ IN OUT UINTN *MemoryMapSize,
+ IN UINTN DescriptorSize
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEnd;
+ UINT64 MemoryBlockLength;
+ EFI_MEMORY_DESCRIPTOR *NewMemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *NextMemoryMapEntry;
+
+ MemoryMapEntry = MemoryMap;
+ NewMemoryMapEntry = MemoryMap;
+ MemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) MemoryMap + *MemoryMapSize);
+ while ((UINTN)MemoryMapEntry < (UINTN)MemoryMapEnd) {
+ CopyMem (NewMemoryMapEntry, MemoryMapEntry, sizeof(EFI_MEMORY_DESCRIPTOR));
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+
+ do {
+ MemoryBlockLength = (UINT64) (EFI_PAGES_TO_SIZE((UINTN)MemoryMapEntry->NumberOfPages));
+ if (((UINTN)NextMemoryMapEntry < (UINTN)MemoryMapEnd) &&
+ IsUefiPageNotPresent(MemoryMapEntry) && IsUefiPageNotPresent(NextMemoryMapEntry) &&
+ ((MemoryMapEntry->PhysicalStart + MemoryBlockLength) == NextMemoryMapEntry->PhysicalStart)) {
+ MemoryMapEntry->NumberOfPages += NextMemoryMapEntry->NumberOfPages;
+ if (NewMemoryMapEntry != MemoryMapEntry) {
+ NewMemoryMapEntry->NumberOfPages += NextMemoryMapEntry->NumberOfPages;
+ }
+
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (NextMemoryMapEntry, DescriptorSize);
+ continue;
+ } else {
+ MemoryMapEntry = PREVIOUS_MEMORY_DESCRIPTOR (NextMemoryMapEntry, DescriptorSize);
+ break;
+ }
+ } while (TRUE);
+
+ MemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ NewMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (NewMemoryMapEntry, DescriptorSize);
+ }
+
+ *MemoryMapSize = (UINTN)NewMemoryMapEntry - (UINTN)MemoryMap;
+
+ return ;
+}
+
+/**
+ This function caches the GCD memory map information.
+**/
+VOID
+GetGcdMemoryMap (
+ VOID
+ )
+{
+ UINTN NumberOfDescriptors;
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemSpaceMap;
+ EFI_STATUS Status;
+ UINTN Index;
+
+ Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemSpaceMap);
+ if (EFI_ERROR (Status)) {
+ return ;
+ }
+
+ mGcdMemNumberOfDesc = 0;
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {
+ if (MemSpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeReserved &&
+ (MemSpaceMap[Index].Capabilities & (EFI_MEMORY_PRESENT | EFI_MEMORY_INITIALIZED | EFI_MEMORY_TESTED)) ==
+ (EFI_MEMORY_PRESENT | EFI_MEMORY_INITIALIZED)
+ ) {
+ mGcdMemNumberOfDesc++;
+ }
+ }
+
+ mGcdMemSpace = AllocateZeroPool (mGcdMemNumberOfDesc * sizeof (EFI_GCD_MEMORY_SPACE_DESCRIPTOR));
+ ASSERT (mGcdMemSpace != NULL);
+ if (mGcdMemSpace == NULL) {
+ mGcdMemNumberOfDesc = 0;
+ gBS->FreePool (MemSpaceMap);
+ return ;
+ }
+
+ mGcdMemNumberOfDesc = 0;
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {
+ if (MemSpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeReserved &&
+ (MemSpaceMap[Index].Capabilities & (EFI_MEMORY_PRESENT | EFI_MEMORY_INITIALIZED | EFI_MEMORY_TESTED)) ==
+ (EFI_MEMORY_PRESENT | EFI_MEMORY_INITIALIZED)
+ ) {
+ CopyMem (
+ &mGcdMemSpace[mGcdMemNumberOfDesc],
+ &MemSpaceMap[Index],
+ sizeof(EFI_GCD_MEMORY_SPACE_DESCRIPTOR)
+ );
+ mGcdMemNumberOfDesc++;
+ }
+ }
+
+ gBS->FreePool (MemSpaceMap);
+}
+
+/**
+ Get UEFI MemoryAttributesTable.
+**/
+VOID
+GetUefiMemoryAttributesTable (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_MEMORY_ATTRIBUTES_TABLE *MemoryAttributesTable;
+ UINTN MemoryAttributesTableSize;
+
+ Status = EfiGetSystemConfigurationTable (&gEfiMemoryAttributesTableGuid, (VOID **)&MemoryAttributesTable);
+ if (!EFI_ERROR (Status) && (MemoryAttributesTable != NULL)) {
+ MemoryAttributesTableSize = sizeof(EFI_MEMORY_ATTRIBUTES_TABLE) + MemoryAttributesTable->DescriptorSize * MemoryAttributesTable->NumberOfEntries;
+ mUefiMemoryAttributesTable = AllocateCopyPool (MemoryAttributesTableSize, MemoryAttributesTable);
+ ASSERT (mUefiMemoryAttributesTable != NULL);
+ }
+}
+
+/**
+ This function caches the UEFI memory map information.
+**/
+VOID
+GetUefiMemoryMap (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ UINTN MapKey;
+ UINT32 DescriptorVersion;
+ EFI_MEMORY_DESCRIPTOR *MemoryMap;
+ UINTN UefiMemoryMapSize;
+
+ DEBUG ((DEBUG_INFO, "GetUefiMemoryMap\n"));
+
+ UefiMemoryMapSize = 0;
+ MemoryMap = NULL;
+ Status = gBS->GetMemoryMap (
+ &UefiMemoryMapSize,
+ MemoryMap,
+ &MapKey,
+ &mUefiDescriptorSize,
+ &DescriptorVersion
+ );
+ ASSERT (Status == EFI_BUFFER_TOO_SMALL);
+
+ do {
+ Status = gBS->AllocatePool (EfiBootServicesData, UefiMemoryMapSize, (VOID **)&MemoryMap);
+ ASSERT (MemoryMap != NULL);
+ if (MemoryMap == NULL) {
+ return ;
+ }
+
+ Status = gBS->GetMemoryMap (
+ &UefiMemoryMapSize,
+ MemoryMap,
+ &MapKey,
+ &mUefiDescriptorSize,
+ &DescriptorVersion
+ );
+ if (EFI_ERROR (Status)) {
+ gBS->FreePool (MemoryMap);
+ MemoryMap = NULL;
+ }
+ } while (Status == EFI_BUFFER_TOO_SMALL);
+
+ if (MemoryMap == NULL) {
+ return ;
+ }
+
+ SortMemoryMap (MemoryMap, UefiMemoryMapSize, mUefiDescriptorSize);
+ MergeMemoryMapForNotPresentEntry (MemoryMap, &UefiMemoryMapSize, mUefiDescriptorSize);
+
+ mUefiMemoryMapSize = UefiMemoryMapSize;
+ mUefiMemoryMap = AllocateCopyPool (UefiMemoryMapSize, MemoryMap);
+ ASSERT (mUefiMemoryMap != NULL);
+
+ gBS->FreePool (MemoryMap);
+
+ //
+ // Get additional information from GCD memory map.
+ //
+ GetGcdMemoryMap ();
+
+ //
+ // Get UEFI memory attributes table.
+ //
+ GetUefiMemoryAttributesTable ();
+}
+
+/**
+ This function sets UEFI memory attribute according to UEFI memory map.
+
+ The normal memory region is marked as not present, such as
+ EfiLoaderCode/Data, EfiBootServicesCode/Data, EfiConventionalMemory,
+ EfiUnusableMemory, EfiACPIReclaimMemory.
+**/
+VOID
+SetUefiMemMapAttributes (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_MEMORY_DESCRIPTOR *MemoryMap;
+ UINTN MemoryMapEntryCount;
+ UINTN Index;
+ EFI_MEMORY_DESCRIPTOR *Entry;
+
+ DEBUG ((DEBUG_INFO, "SetUefiMemMapAttributes\n"));
+
+ if (mUefiMemoryMap != NULL) {
+ MemoryMapEntryCount = mUefiMemoryMapSize/mUefiDescriptorSize;
+ MemoryMap = mUefiMemoryMap;
+ for (Index = 0; Index < MemoryMapEntryCount; Index++) {
+ if (IsUefiPageNotPresent(MemoryMap)) {
+ Status = SmmSetMemoryAttributes (
+ MemoryMap->PhysicalStart,
+ EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
+ EFI_MEMORY_RP
+ );
+ DEBUG ((
+ DEBUG_INFO,
+ "UefiMemory protection: 0x%lx - 0x%lx %r\n",
+ MemoryMap->PhysicalStart,
+ MemoryMap->PhysicalStart + (UINT64)EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
+ Status
+ ));
+ }
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, mUefiDescriptorSize);
+ }
+ }
+ //
+ // Do not free mUefiMemoryMap, it will be checked in IsSmmCommBufferForbiddenAddress().
+ //
+
+ //
+ // Set untested memory as not present.
+ //
+ if (mGcdMemSpace != NULL) {
+ for (Index = 0; Index < mGcdMemNumberOfDesc; Index++) {
+ Status = SmmSetMemoryAttributes (
+ mGcdMemSpace[Index].BaseAddress,
+ mGcdMemSpace[Index].Length,
+ EFI_MEMORY_RP
+ );
+ DEBUG ((
+ DEBUG_INFO,
+ "GcdMemory protection: 0x%lx - 0x%lx %r\n",
+ mGcdMemSpace[Index].BaseAddress,
+ mGcdMemSpace[Index].BaseAddress + mGcdMemSpace[Index].Length,
+ Status
+ ));
+ }
+ }
+ //
+ // Do not free mGcdMemSpace, it will be checked in IsSmmCommBufferForbiddenAddress().
+ //
+
+ //
+ // Set UEFI runtime memory with EFI_MEMORY_RO as not present.
+ //
+ if (mUefiMemoryAttributesTable != NULL) {
+ Entry = (EFI_MEMORY_DESCRIPTOR *)(mUefiMemoryAttributesTable + 1);
+ for (Index = 0; Index < mUefiMemoryAttributesTable->NumberOfEntries; Index++) {
+ if (Entry->Type == EfiRuntimeServicesCode || Entry->Type == EfiRuntimeServicesData) {
+ if ((Entry->Attribute & EFI_MEMORY_RO) != 0) {
+ Status = SmmSetMemoryAttributes (
+ Entry->PhysicalStart,
+ EFI_PAGES_TO_SIZE((UINTN)Entry->NumberOfPages),
+ EFI_MEMORY_RP
+ );
+ DEBUG ((
+ DEBUG_INFO,
+ "UefiMemoryAttribute protection: 0x%lx - 0x%lx %r\n",
+ Entry->PhysicalStart,
+ Entry->PhysicalStart + (UINT64)EFI_PAGES_TO_SIZE((UINTN)Entry->NumberOfPages),
+ Status
+ ));
+ }
+ }
+ Entry = NEXT_MEMORY_DESCRIPTOR (Entry, mUefiMemoryAttributesTable->DescriptorSize);
+ }
+ }
+ //
+ // Do not free mUefiMemoryAttributesTable, it will be checked in IsSmmCommBufferForbiddenAddress().
+ //
+}
+
+/**
+ Return if the Address is forbidden as SMM communication buffer.
+
+ @param[in] Address the address to be checked
+
+ @return TRUE The address is forbidden as SMM communication buffer.
+ @return FALSE The address is allowed as SMM communication buffer.
+**/
+BOOLEAN
+IsSmmCommBufferForbiddenAddress (
+ IN UINT64 Address
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMap;
+ UINTN MemoryMapEntryCount;
+ UINTN Index;
+ EFI_MEMORY_DESCRIPTOR *Entry;
+
+ if (mUefiMemoryMap != NULL) {
+ MemoryMap = mUefiMemoryMap;
+ MemoryMapEntryCount = mUefiMemoryMapSize/mUefiDescriptorSize;
+ for (Index = 0; Index < MemoryMapEntryCount; Index++) {
+ if (IsUefiPageNotPresent (MemoryMap)) {
+ if ((Address >= MemoryMap->PhysicalStart) &&
+ (Address < MemoryMap->PhysicalStart + EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages)) ) {
+ return TRUE;
+ }
+ }
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, mUefiDescriptorSize);
+ }
+ }
+
+ if (mGcdMemSpace != NULL) {
+ for (Index = 0; Index < mGcdMemNumberOfDesc; Index++) {
+ if ((Address >= mGcdMemSpace[Index].BaseAddress) &&
+ (Address < mGcdMemSpace[Index].BaseAddress + mGcdMemSpace[Index].Length) ) {
+ return TRUE;
+ }
+ }
+ }
+
+ if (mUefiMemoryAttributesTable != NULL) {
+ Entry = (EFI_MEMORY_DESCRIPTOR *)(mUefiMemoryAttributesTable + 1);
+ for (Index = 0; Index < mUefiMemoryAttributesTable->NumberOfEntries; Index++) {
+ if (Entry->Type == EfiRuntimeServicesCode || Entry->Type == EfiRuntimeServicesData) {
+ if ((Entry->Attribute & EFI_MEMORY_RO) != 0) {
+ if ((Address >= Entry->PhysicalStart) &&
+ (Address < Entry->PhysicalStart + LShiftU64 (Entry->NumberOfPages, EFI_PAGE_SHIFT))) {
+ return TRUE;
+ }
+ Entry = NEXT_MEMORY_DESCRIPTOR (Entry, mUefiMemoryAttributesTable->DescriptorSize);
+ }
+ }
+ }
+ }
+ return FALSE;
+}
+
+/**
+ This function set given attributes of the memory region specified by
+ BaseAddress and Length.
+
+ @param This The EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL instance.
+ @param BaseAddress The physical address that is the start address of
+ a memory region.
+ @param Length The size in bytes of the memory region.
+ @param Attributes The bit mask of attributes to set for the memory
+ region.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of
+ attributes that cannot be set together.
+ @retval EFI_UNSUPPORTED The processor does not support one or more
+ bytes of the memory resource range specified
+ by BaseAddress and Length.
+ The bit mask of attributes is not supported for
+ the memory resource range specified by
+ BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+EdkiiSmmSetMemoryAttributes (
+ IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ return SmmSetMemoryAttributes (BaseAddress, Length, Attributes);
+}
+
+/**
+ This function clears given attributes of the memory region specified by
+ BaseAddress and Length.
+
+ @param This The EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL instance.
+ @param BaseAddress The physical address that is the start address of
+ a memory region.
+ @param Length The size in bytes of the memory region.
+ @param Attributes The bit mask of attributes to clear for the memory
+ region.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of
+ attributes that cannot be cleared together.
+ @retval EFI_UNSUPPORTED The processor does not support one or more
+ bytes of the memory resource range specified
+ by BaseAddress and Length.
+ The bit mask of attributes is not supported for
+ the memory resource range specified by
+ BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+EdkiiSmmClearMemoryAttributes (
+ IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ return SmmClearMemoryAttributes (BaseAddress, Length, Attributes);
+}
+
+/**
+ This function retrieves the attributes of the memory region specified by
+ BaseAddress and Length. If different attributes are got from different part
+ of the memory region, EFI_NO_MAPPING will be returned.
+
+ @param This The EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL instance.
+ @param BaseAddress The physical address that is the start address of
+ a memory region.
+ @param Length The size in bytes of the memory region.
+ @param Attributes Pointer to attributes returned.
+
+ @retval EFI_SUCCESS The attributes got for the memory region.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes is NULL.
+ @retval EFI_NO_MAPPING Attributes are not consistent cross the memory
+ region.
+ @retval EFI_UNSUPPORTED The processor does not support one or more
+ bytes of the memory resource range specified
+ by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+EdkiiSmmGetMemoryAttributes (
+ IN EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL *This,
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ OUT UINT64 *Attributes
+ )
+{
+ EFI_PHYSICAL_ADDRESS Address;
+ UINT64 *PageEntry;
+ UINT64 MemAttr;
+ PAGE_ATTRIBUTE PageAttr;
+ INT64 Size;
+
+ if (Length < SIZE_4KB || Attributes == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ Size = (INT64)Length;
+ MemAttr = (UINT64)-1;
+
+ do {
+
+ PageEntry = GetPageTableEntry (BaseAddress, &PageAttr);
+ if (PageEntry == NULL || PageAttr == PageNone) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // If the memory range is cross page table boundary, make sure they
+ // share the same attribute. Return EFI_NO_MAPPING if not.
+ //
+ *Attributes = GetAttributesFromPageEntry (PageEntry);
+ if (MemAttr != (UINT64)-1 && *Attributes != MemAttr) {
+ return EFI_NO_MAPPING;
+ }
+
+ switch (PageAttr) {
+ case Page4K:
+ Address = *PageEntry & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64;
+ Size -= (SIZE_4KB - (BaseAddress - Address));
+ BaseAddress += (SIZE_4KB - (BaseAddress - Address));
+ break;
+
+ case Page2M:
+ Address = *PageEntry & ~mAddressEncMask & PAGING_2M_ADDRESS_MASK_64;
+ Size -= SIZE_2MB - (BaseAddress - Address);
+ BaseAddress += SIZE_2MB - (BaseAddress - Address);
+ break;
+
+ case Page1G:
+ Address = *PageEntry & ~mAddressEncMask & PAGING_1G_ADDRESS_MASK_64;
+ Size -= SIZE_1GB - (BaseAddress - Address);
+ BaseAddress += SIZE_1GB - (BaseAddress - Address);
+ break;
+
+ default:
+ return EFI_UNSUPPORTED;
+ }
+
+ MemAttr = *Attributes;
+
+ } while (Size > 0);
+
+ return EFI_SUCCESS;
+}
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMp.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMp.c
new file mode 100644
index 00000000..ca058855
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMp.c
@@ -0,0 +1,344 @@
+/** @file
+SMM MP protocol implementation
+
+Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmMp.h"
+
+///
+/// SMM MP Protocol instance
+///
+EFI_MM_MP_PROTOCOL mSmmMp = {
+ EFI_MM_MP_PROTOCOL_REVISION,
+ 0,
+ SmmMpGetNumberOfProcessors,
+ SmmMpDispatchProcedure,
+ SmmMpBroadcastProcedure,
+ SmmMpSetStartupProcedure,
+ SmmMpCheckForProcedure,
+ SmmMpWaitForProcedure
+};
+
+/**
+ Service to retrieves the number of logical processor in the platform.
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[out] NumberOfProcessors Pointer to the total number of logical processors in the system,
+ including the BSP and all APs.
+
+ @retval EFI_SUCCESS The number of processors was retrieved successfully
+ @retval EFI_INVALID_PARAMETER NumberOfProcessors is NULL
+**/
+EFI_STATUS
+EFIAPI
+SmmMpGetNumberOfProcessors (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ OUT UINTN *NumberOfProcessors
+ )
+{
+ if (NumberOfProcessors == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ *NumberOfProcessors = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ This service allows the caller to invoke a procedure one of the application processors (AP). This
+ function uses an optional token parameter to support blocking and non-blocking modes. If the token
+ is passed into the call, the function will operate in a non-blocking fashion and the caller can
+ check for completion with CheckOnProcedure or WaitForProcedure.
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Procedure A pointer to the procedure to be run on the designated target
+ AP of the system. Type EFI_AP_PROCEDURE2 is defined below in
+ related definitions.
+ @param[in] CpuNumber The zero-based index of the processor number of the target
+ AP, on which the code stream is supposed to run. If the number
+ points to the calling processor then it will not run the
+ supplied code.
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for this AP to
+ finish execution of Procedure, either for blocking or
+ non-blocking mode. Zero means infinity. If the timeout
+ expires before this AP returns from Procedure, then Procedure
+ on the AP is terminated. If the timeout expires in blocking
+ mode, the call returns EFI_TIMEOUT. If the timeout expires
+ in non-blocking mode, the timeout determined can be through
+ CheckOnProcedure or WaitForProcedure.
+ Note that timeout support is optional. Whether an
+ implementation supports this feature, can be determined via
+ the Attributes data member.
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code
+ that is run by the AP. It is an optional common mailbox
+ between APs and the caller to share information.
+ @param[in,out] Token This is parameter is broken into two components:
+ 1.Token->Completion is an optional parameter that allows the
+ caller to execute the procedure in a blocking or non-blocking
+ fashion. If it is NULL the call is blocking, and the call will
+ not return until the AP has completed the procedure. If the
+ token is not NULL, the call will return immediately. The caller
+ can check whether the procedure has completed with
+ CheckOnProcedure or WaitForProcedure.
+ 2.Token->Status The implementation updates the address pointed
+ at by this variable with the status code returned by Procedure
+ when it completes execution on the target AP, or with EFI_TIMEOUT
+ if the Procedure fails to complete within the optional timeout.
+ The implementation will update this variable with EFI_NOT_READY
+ prior to starting Procedure on the target AP
+ @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
+ by Procedure when it completes execution on the target AP, or with
+ EFI_TIMEOUT if the Procedure fails to complete within the optional
+ timeout. The implementation will update this variable with
+ EFI_NOT_READY prior to starting Procedure on the target AP.
+
+ @retval EFI_SUCCESS In the blocking case, this indicates that Procedure has completed
+ execution on the target AP.
+ In the non-blocking case this indicates that the procedure has
+ been successfully scheduled for execution on the target AP.
+ @retval EFI_INVALID_PARAMETER The input arguments are out of range. Either the target AP is the
+ caller of the function, or the Procedure or Token is NULL
+ @retval EFI_NOT_READY If the target AP is busy executing another procedure
+ @retval EFI_ALREADY_STARTED Token is already in use for another procedure
+ @retval EFI_TIMEOUT In blocking mode, the timeout expired before the specified AP
+ has finished
+ @retval EFI_OUT_OF_RESOURCES Could not allocate a required resource.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpDispatchProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN EFI_AP_PROCEDURE2 Procedure,
+ IN UINTN CpuNumber,
+ IN UINTN TimeoutInMicroseconds,
+ IN OUT VOID *ProcedureArguments OPTIONAL,
+ IN OUT MM_COMPLETION *Token,
+ IN OUT EFI_STATUS *CPUStatus
+ )
+{
+ return InternalSmmStartupThisAp (
+ Procedure,
+ CpuNumber,
+ ProcedureArguments,
+ Token,
+ TimeoutInMicroseconds,
+ CPUStatus
+ );
+}
+
+/**
+ This service allows the caller to invoke a procedure on all running application processors (AP)
+ except the caller. This function uses an optional token parameter to support blocking and
+ nonblocking modes. If the token is passed into the call, the function will operate in a non-blocking
+ fashion and the caller can check for completion with CheckOnProcedure or WaitForProcedure.
+
+ It is not necessary for the implementation to run the procedure on every processor on the platform.
+ Processors that are powered down in such a way that they cannot respond to interrupts, may be
+ excluded from the broadcast.
+
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Procedure A pointer to the code stream to be run on the APs that have
+ entered MM. Type EFI_AP_PROCEDURE is defined below in related
+ definitions.
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
+ execution of Procedure, either for blocking or non-blocking mode.
+ Zero means infinity. If the timeout expires before all APs return
+ from Procedure, then Procedure on the failed APs is terminated. If
+ the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
+ If the timeout expires in non-blocking mode, the timeout determined
+ can be through CheckOnProcedure or WaitForProcedure.
+ Note that timeout support is optional. Whether an implementation
+ supports this feature can be determined via the Attributes data
+ member.
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code
+ that is run by the AP. It is an optional common mailbox
+ between APs and the caller to share information.
+ @param[in,out] Token This is parameter is broken into two components:
+ 1.Token->Completion is an optional parameter that allows the
+ caller to execute the procedure in a blocking or non-blocking
+ fashion. If it is NULL the call is blocking, and the call will
+ not return until the AP has completed the procedure. If the
+ token is not NULL, the call will return immediately. The caller
+ can check whether the procedure has completed with
+ CheckOnProcedure or WaitForProcedure.
+ 2.Token->Status The implementation updates the address pointed
+ at by this variable with the status code returned by Procedure
+ when it completes execution on the target AP, or with EFI_TIMEOUT
+ if the Procedure fails to complete within the optional timeout.
+ The implementation will update this variable with EFI_NOT_READY
+ prior to starting Procedure on the target AP
+ @param[in,out] CPUStatus This optional pointer may be used to get the individual status
+ returned by every AP that participated in the broadcast. This
+ parameter if used provides the base address of an array to hold
+ the EFI_STATUS value of each AP in the system. The size of the
+ array can be ascertained by the GetNumberOfProcessors function.
+ As mentioned above, the broadcast may not include every processor
+ in the system. Some implementations may exclude processors that
+ have been powered down in such a way that they are not responsive
+ to interrupts. Additionally the broadcast excludes the processor
+ which is making the BroadcastProcedure call. For every excluded
+ processor, the array entry must contain a value of EFI_NOT_STARTED
+
+ @retval EFI_SUCCESS In the blocking case, this indicates that Procedure has completed
+ execution on the APs.
+ In the non-blocking case this indicates that the procedure has
+ been successfully scheduled for execution on the APs.
+ @retval EFI_INVALID_PARAMETER The Procedure or Token is NULL
+ @retval EFI_NOT_READY If the target AP is busy executing another procedure
+ @retval EFI_ALREADY_STARTED Token is already in use for another procedure
+ @retval EFI_TIMEOUT In blocking mode, the timeout expired before the specified AP
+ has finished.
+ @retval EFI_OUT_OF_RESOURCES Could not allocate a required resource.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpBroadcastProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN EFI_AP_PROCEDURE2 Procedure,
+ IN UINTN TimeoutInMicroseconds,
+ IN OUT VOID *ProcedureArguments OPTIONAL,
+ IN OUT MM_COMPLETION *Token,
+ IN OUT EFI_STATUS *CPUStatus
+ )
+{
+ return InternalSmmStartupAllAPs(
+ Procedure,
+ TimeoutInMicroseconds,
+ ProcedureArguments,
+ Token,
+ CPUStatus
+ );
+}
+
+/**
+ This service allows the caller to set a startup procedure that will be executed when an AP powers
+ up from a state where core configuration and context is lost. The procedure is execution has the
+ following properties:
+ 1. The procedure executes before the processor is handed over to the operating system.
+ 2. All processors execute the same startup procedure.
+ 3. The procedure may run in parallel with other procedures invoked through the functions in this
+ protocol, or with processors that are executing an MM handler or running in the operating system.
+
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Procedure A pointer to the code stream to be run on the designated target AP
+ of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
+ with the related definitions of
+ EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
+ If caller may pass a value of NULL to deregister any existing
+ startup procedure.
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
+ run by the AP. It is an optional common mailbox between APs and
+ the caller to share information
+
+ @retval EFI_SUCCESS The Procedure has been set successfully.
+ @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpSetStartupProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN EFI_AP_PROCEDURE Procedure,
+ IN OUT VOID *ProcedureArguments OPTIONAL
+ )
+{
+ return RegisterStartupProcedure (Procedure, ProcedureArguments);
+}
+
+/**
+ When non-blocking execution of a procedure on an AP is invoked with DispatchProcedure,
+ via the use of a token, this function can be used to check for completion of the procedure on the AP.
+ The function takes the token that was passed into the DispatchProcedure call. If the procedure
+ is complete, and therefore it is now possible to run another procedure on the same AP, this function
+ returns EFI_SUCESS. In this case the status returned by the procedure that executed on the AP is
+ returned in the token's Status field. If the procedure has not yet completed, then this function
+ returns EFI_NOT_READY.
+
+ When a non-blocking execution of a procedure is invoked with BroadcastProcedure, via the
+ use of a token, this function can be used to check for completion of the procedure on all the
+ broadcast APs. The function takes the token that was passed into the BroadcastProcedure
+ call. If the procedure is complete on all broadcast APs this function returns EFI_SUCESS. In this
+ case the Status field in the token passed into the function reflects the overall result of the
+ invocation, which may be EFI_SUCCESS, if all executions succeeded, or the first observed failure.
+ If the procedure has not yet completed on the broadcast APs, the function returns
+ EFI_NOT_READY.
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Token This parameter describes the token that was passed into
+ DispatchProcedure or BroadcastProcedure.
+
+ @retval EFI_SUCCESS Procedure has completed.
+ @retval EFI_NOT_READY The Procedure has not completed.
+ @retval EFI_INVALID_PARAMETER Token or Token->Completion is NULL
+ @retval EFI_NOT_FOUND Token is not currently in use for a non-blocking call
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpCheckForProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN MM_COMPLETION Token
+ )
+{
+ if (Token == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (!IsTokenInUse ((SPIN_LOCK *)Token)) {
+ return EFI_NOT_FOUND;
+ }
+
+ return IsApReady ((SPIN_LOCK *)Token);
+}
+
+/**
+ When a non-blocking execution of a procedure on an AP is invoked via DispatchProcedure,
+ this function will block the caller until the remote procedure has completed on the designated AP.
+ The non-blocking procedure invocation is identified by the Token parameter, which must match the
+ token that used when DispatchProcedure was called. Upon completion the status returned by
+ the procedure that executed on the AP is used to update the token's Status field.
+
+ When a non-blocking execution of a procedure on an AP is invoked via BroadcastProcedure
+ this function will block the caller until the remote procedure has completed on all of the APs that
+ entered MM. The non-blocking procedure invocation is identified by the Token parameter, which
+ must match the token that used when BroadcastProcedure was called. Upon completion the
+ overall status returned by the procedures that executed on the broadcast AP is used to update the
+ token's Status field. The overall status may be EFI_SUCCESS, if all executions succeeded, or the
+ first observed failure.
+
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Token This parameter describes the token that was passed into
+ DispatchProcedure or BroadcastProcedure.
+
+ @retval EFI_SUCCESS Procedure has completed.
+ @retval EFI_INVALID_PARAMETER Token or Token->Completion is NULL
+ @retval EFI_NOT_FOUND Token is not currently in use for a non-blocking call
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpWaitForProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN MM_COMPLETION Token
+ )
+{
+ EFI_STATUS Status;
+
+ do {
+ Status = SmmMpCheckForProcedure (This, Token);
+ } while (Status == EFI_NOT_READY);
+
+ return Status;
+}
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMp.h b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMp.h
new file mode 100644
index 00000000..98146cfc
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmMp.h
@@ -0,0 +1,285 @@
+/** @file
+Include file for SMM MP protocol implementation.
+
+Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _SMM_MP_PROTOCOL_H_
+#define _SMM_MP_PROTOCOL_H_
+
+//
+// SMM MP Protocol function prototypes.
+//
+
+/**
+ Service to retrieves the number of logical processor in the platform.
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[out] NumberOfProcessors Pointer to the total number of logical processors in the system,
+ including the BSP and all APs.
+
+ @retval EFI_SUCCESS The number of processors was retrieved successfully
+ @retval EFI_INVALID_PARAMETER NumberOfProcessors is NULL
+**/
+EFI_STATUS
+EFIAPI
+SmmMpGetNumberOfProcessors (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ OUT UINTN *NumberOfProcessors
+ );
+
+
+/**
+ This service allows the caller to invoke a procedure one of the application processors (AP). This
+ function uses an optional token parameter to support blocking and non-blocking modes. If the token
+ is passed into the call, the function will operate in a non-blocking fashion and the caller can
+ check for completion with CheckOnProcedure or WaitForProcedure.
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Procedure A pointer to the procedure to be run on the designated target
+ AP of the system. Type EFI_AP_PROCEDURE2 is defined below in
+ related definitions.
+ @param[in] CpuNumber The zero-based index of the processor number of the target
+ AP, on which the code stream is supposed to run. If the number
+ points to the calling processor then it will not run the
+ supplied code.
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for this AP to
+ finish execution of Procedure, either for blocking or
+ non-blocking mode. Zero means infinity. If the timeout
+ expires before this AP returns from Procedure, then Procedure
+ on the AP is terminated. If the timeout expires in blocking
+ mode, the call returns EFI_TIMEOUT. If the timeout expires
+ in non-blocking mode, the timeout determined can be through
+ CheckOnProcedure or WaitForProcedure.
+ Note that timeout support is optional. Whether an
+ implementation supports this feature, can be determined via
+ the Attributes data member.
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code
+ that is run by the AP. It is an optional common mailbox
+ between APs and the caller to share information.
+ @param[in,out] Token This is parameter is broken into two components:
+ 1.Token->Completion is an optional parameter that allows the
+ caller to execute the procedure in a blocking or non-blocking
+ fashion. If it is NULL the call is blocking, and the call will
+ not return until the AP has completed the procedure. If the
+ token is not NULL, the call will return immediately. The caller
+ can check whether the procedure has completed with
+ CheckOnProcedure or WaitForProcedure.
+ 2.Token->Status The implementation updates the address pointed
+ at by this variable with the status code returned by Procedure
+ when it completes execution on the target AP, or with EFI_TIMEOUT
+ if the Procedure fails to complete within the optional timeout.
+ The implementation will update this variable with EFI_NOT_READY
+ prior to starting Procedure on the target AP
+ @param[in,out] CPUStatus This optional pointer may be used to get the status code returned
+ by Procedure when it completes execution on the target AP, or with
+ EFI_TIMEOUT if the Procedure fails to complete within the optional
+ timeout. The implementation will update this variable with
+ EFI_NOT_READY prior to starting Procedure on the target AP.
+
+ @retval EFI_SUCCESS In the blocking case, this indicates that Procedure has completed
+ execution on the target AP.
+ In the non-blocking case this indicates that the procedure has
+ been successfully scheduled for execution on the target AP.
+ @retval EFI_INVALID_PARAMETER The input arguments are out of range. Either the target AP is the
+ caller of the function, or the Procedure or Token is NULL
+ @retval EFI_NOT_READY If the target AP is busy executing another procedure
+ @retval EFI_ALREADY_STARTED Token is already in use for another procedure
+ @retval EFI_TIMEOUT In blocking mode, the timeout expired before the specified AP
+ has finished
+ @retval EFI_OUT_OF_RESOURCES Could not allocate a required resource.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpDispatchProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN EFI_AP_PROCEDURE2 Procedure,
+ IN UINTN CpuNumber,
+ IN UINTN TimeoutInMicroseconds,
+ IN OUT VOID *ProcedureArguments OPTIONAL,
+ IN OUT MM_COMPLETION *Token,
+ IN OUT EFI_STATUS *CPUStatus
+ );
+
+/**
+ This service allows the caller to invoke a procedure on all running application processors (AP)
+ except the caller. This function uses an optional token parameter to support blocking and
+ nonblocking modes. If the token is passed into the call, the function will operate in a non-blocking
+ fashion and the caller can check for completion with CheckOnProcedure or WaitForProcedure.
+
+ It is not necessary for the implementation to run the procedure on every processor on the platform.
+ Processors that are powered down in such a way that they cannot respond to interrupts, may be
+ excluded from the broadcast.
+
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Procedure A pointer to the code stream to be run on the APs that have
+ entered MM. Type EFI_AP_PROCEDURE is defined below in related
+ definitions.
+ @param[in] TimeoutInMicroseconds Indicates the time limit in microseconds for the APs to finish
+ execution of Procedure, either for blocking or non-blocking mode.
+ Zero means infinity. If the timeout expires before all APs return
+ from Procedure, then Procedure on the failed APs is terminated. If
+ the timeout expires in blocking mode, the call returns EFI_TIMEOUT.
+ If the timeout expires in non-blocking mode, the timeout determined
+ can be through CheckOnProcedure or WaitForProcedure.
+ Note that timeout support is optional. Whether an implementation
+ supports this feature can be determined via the Attributes data
+ member.
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code
+ that is run by the AP. It is an optional common mailbox
+ between APs and the caller to share information.
+ @param[in,out] Token This is parameter is broken into two components:
+ 1.Token->Completion is an optional parameter that allows the
+ caller to execute the procedure in a blocking or non-blocking
+ fashion. If it is NULL the call is blocking, and the call will
+ not return until the AP has completed the procedure. If the
+ token is not NULL, the call will return immediately. The caller
+ can check whether the procedure has completed with
+ CheckOnProcedure or WaitForProcedure.
+ 2.Token->Status The implementation updates the address pointed
+ at by this variable with the status code returned by Procedure
+ when it completes execution on the target AP, or with EFI_TIMEOUT
+ if the Procedure fails to complete within the optional timeout.
+ The implementation will update this variable with EFI_NOT_READY
+ prior to starting Procedure on the target AP
+ @param[in,out] CPUStatus This optional pointer may be used to get the individual status
+ returned by every AP that participated in the broadcast. This
+ parameter if used provides the base address of an array to hold
+ the EFI_STATUS value of each AP in the system. The size of the
+ array can be ascertained by the GetNumberOfProcessors function.
+ As mentioned above, the broadcast may not include every processor
+ in the system. Some implementations may exclude processors that
+ have been powered down in such a way that they are not responsive
+ to interrupts. Additionally the broadcast excludes the processor
+ which is making the BroadcastProcedure call. For every excluded
+ processor, the array entry must contain a value of EFI_NOT_STARTED
+
+ @retval EFI_SUCCESS In the blocking case, this indicates that Procedure has completed
+ execution on the APs.
+ In the non-blocking case this indicates that the procedure has
+ been successfully scheduled for execution on the APs.
+ @retval EFI_INVALID_PARAMETER The Procedure or Token is NULL
+ @retval EFI_NOT_READY If the target AP is busy executing another procedure
+ @retval EFI_ALREADY_STARTED Token is already in use for another procedure
+ @retval EFI_TIMEOUT In blocking mode, the timeout expired before the specified AP
+ has finished
+ @retval EFI_OUT_OF_RESOURCES Could not allocate a required resource.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpBroadcastProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN EFI_AP_PROCEDURE2 Procedure,
+ IN UINTN TimeoutInMicroseconds,
+ IN OUT VOID *ProcedureArguments OPTIONAL,
+ IN OUT MM_COMPLETION *Token,
+ IN OUT EFI_STATUS *CPUStatus
+ );
+
+
+/**
+ This service allows the caller to set a startup procedure that will be executed when an AP powers
+ up from a state where core configuration and context is lost. The procedure is execution has the
+ following properties:
+ 1. The procedure executes before the processor is handed over to the operating system.
+ 2. All processors execute the same startup procedure.
+ 3. The procedure may run in parallel with other procedures invoked through the functions in this
+ protocol, or with processors that are executing an MM handler or running in the operating system.
+
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Procedure A pointer to the code stream to be run on the designated target AP
+ of the system. Type EFI_AP_PROCEDURE is defined below in Volume 2
+ with the related definitions of
+ EFI_MP_SERVICES_PROTOCOL.StartupAllAPs.
+ If caller may pass a value of NULL to deregister any existing
+ startup procedure.
+ @param[in,out] ProcedureArguments Allows the caller to pass a list of parameters to the code that is
+ run by the AP. It is an optional common mailbox between APs and
+ the caller to share information
+
+ @retval EFI_SUCCESS The Procedure has been set successfully.
+ @retval EFI_INVALID_PARAMETER The Procedure is NULL but ProcedureArguments not NULL.
+**/
+EFI_STATUS
+EFIAPI
+SmmMpSetStartupProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN EFI_AP_PROCEDURE Procedure,
+ IN OUT VOID *ProcedureArguments OPTIONAL
+ );
+
+/**
+ When non-blocking execution of a procedure on an AP is invoked with DispatchProcedure,
+ via the use of a token, this function can be used to check for completion of the procedure on the AP.
+ The function takes the token that was passed into the DispatchProcedure call. If the procedure
+ is complete, and therefore it is now possible to run another procedure on the same AP, this function
+ returns EFI_SUCESS. In this case the status returned by the procedure that executed on the AP is
+ returned in the token's Status field. If the procedure has not yet completed, then this function
+ returns EFI_NOT_READY.
+
+ When a non-blocking execution of a procedure is invoked with BroadcastProcedure, via the
+ use of a token, this function can be used to check for completion of the procedure on all the
+ broadcast APs. The function takes the token that was passed into the BroadcastProcedure
+ call. If the procedure is complete on all broadcast APs this function returns EFI_SUCESS. In this
+ case the Status field in the token passed into the function reflects the overall result of the
+ invocation, which may be EFI_SUCCESS, if all executions succeeded, or the first observed failure.
+ If the procedure has not yet completed on the broadcast APs, the function returns
+ EFI_NOT_READY.
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Token This parameter describes the token that was passed into
+ DispatchProcedure or BroadcastProcedure.
+
+ @retval EFI_SUCCESS Procedure has completed.
+ @retval EFI_NOT_READY The Procedure has not completed.
+ @retval EFI_INVALID_PARAMETER Token or Token->Completion is NULL
+ @retval EFI_NOT_FOUND Token is not currently in use for a non-blocking call
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpCheckForProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN MM_COMPLETION Token
+ );
+
+/**
+ When a non-blocking execution of a procedure on an AP is invoked via DispatchProcedure,
+ this function will block the caller until the remote procedure has completed on the designated AP.
+ The non-blocking procedure invocation is identified by the Token parameter, which must match the
+ token that used when DispatchProcedure was called. Upon completion the status returned by
+ the procedure that executed on the AP is used to update the token's Status field.
+
+ When a non-blocking execution of a procedure on an AP is invoked via BroadcastProcedure
+ this function will block the caller until the remote procedure has completed on all of the APs that
+ entered MM. The non-blocking procedure invocation is identified by the Token parameter, which
+ must match the token that used when BroadcastProcedure was called. Upon completion the
+ overall status returned by the procedures that executed on the broadcast AP is used to update the
+ token's Status field. The overall status may be EFI_SUCCESS, if all executions succeeded, or the
+ first observed failure.
+
+
+ @param[in] This The EFI_MM_MP_PROTOCOL instance.
+ @param[in] Token This parameter describes the token that was passed into
+ DispatchProcedure or BroadcastProcedure.
+
+ @retval EFI_SUCCESS Procedure has completed.
+ @retval EFI_INVALID_PARAMETER Token or Token->Completion is NULL
+ @retval EFI_NOT_FOUND Token is not currently in use for a non-blocking call
+
+**/
+EFI_STATUS
+EFIAPI
+SmmMpWaitForProcedure (
+ IN CONST EFI_MM_MP_PROTOCOL *This,
+ IN MM_COMPLETION Token
+ );
+
+#endif
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
new file mode 100644
index 00000000..17c7e552
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
@@ -0,0 +1,1517 @@
+/** @file
+Enable SMM profile.
+
+Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017 - 2020, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+UINT32 mSmmProfileCr3;
+
+SMM_PROFILE_HEADER *mSmmProfileBase;
+MSR_DS_AREA_STRUCT *mMsrDsAreaBase;
+//
+// The buffer to store SMM profile data.
+//
+UINTN mSmmProfileSize;
+
+//
+// The buffer to enable branch trace store.
+//
+UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
+
+//
+// The flag indicates if execute-disable is supported by processor.
+//
+BOOLEAN mXdSupported = TRUE;
+
+//
+// The flag indicates if execute-disable is enabled on processor.
+//
+BOOLEAN mXdEnabled = FALSE;
+
+//
+// The flag indicates if BTS is supported by processor.
+//
+BOOLEAN mBtsSupported = TRUE;
+
+//
+// The flag indicates if SMM profile starts to record data.
+//
+BOOLEAN mSmmProfileStart = FALSE;
+
+//
+// The flag indicates if #DB will be setup in #PF handler.
+//
+BOOLEAN mSetupDebugTrap = FALSE;
+
+//
+// Record the page fault exception count for one instruction execution.
+//
+UINTN *mPFEntryCount;
+
+UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
+UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
+
+MSR_DS_AREA_STRUCT **mMsrDsArea;
+BRANCH_TRACE_RECORD **mMsrBTSRecord;
+UINTN mBTSRecordNumber;
+PEBS_RECORD **mMsrPEBSRecord;
+
+//
+// These memory ranges are always present, they does not generate the access type of page fault exception,
+// but they possibly generate instruction fetch type of page fault exception.
+//
+MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL;
+UINTN mProtectionMemRangeCount = 0;
+
+//
+// Some predefined memory ranges.
+//
+MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = {
+ //
+ // SMRAM range (to be fixed in runtime).
+ // It is always present and instruction fetches are allowed.
+ //
+ {{0x00000000, 0x00000000},TRUE,FALSE},
+
+ //
+ // SMM profile data range( to be fixed in runtime).
+ // It is always present and instruction fetches are not allowed.
+ //
+ {{0x00000000, 0x00000000},TRUE,TRUE},
+
+ //
+ // SMRAM ranges not covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz (to be fixed in runtime).
+ // It is always present and instruction fetches are allowed.
+ // {{0x00000000, 0x00000000},TRUE,FALSE},
+ //
+
+ //
+ // Future extended range could be added here.
+ //
+
+ //
+ // PCI MMIO ranges (to be added in runtime).
+ // They are always present and instruction fetches are not allowed.
+ //
+};
+
+//
+// These memory ranges are mapped by 4KB-page instead of 2MB-page.
+//
+MEMORY_RANGE *mSplitMemRange = NULL;
+UINTN mSplitMemRangeCount = 0;
+
+//
+// SMI command port.
+//
+UINT32 mSmiCommandPort;
+
+/**
+ Disable branch trace store.
+
+**/
+VOID
+DisableBTS (
+ VOID
+ )
+{
+ AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)));
+}
+
+/**
+ Enable branch trace store.
+
+**/
+VOID
+EnableBTS (
+ VOID
+ )
+{
+ AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR));
+}
+
+/**
+ Get CPU Index from APIC ID.
+
+**/
+UINTN
+GetCpuIndex (
+ VOID
+ )
+{
+ UINTN Index;
+ UINT32 ApicId;
+
+ ApicId = GetApicId ();
+
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) {
+ return Index;
+ }
+ }
+ ASSERT (FALSE);
+ return 0;
+}
+
+/**
+ Get the source of IP after execute-disable exception is triggered.
+
+ @param CpuIndex The index of CPU.
+ @param DestinationIP The destination address.
+
+**/
+UINT64
+GetSourceFromDestinationOnBts (
+ UINTN CpuIndex,
+ UINT64 DestinationIP
+ )
+{
+ BRANCH_TRACE_RECORD *CurrentBTSRecord;
+ UINTN Index;
+ BOOLEAN FirstMatch;
+
+ FirstMatch = FALSE;
+
+ CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex;
+ for (Index = 0; Index < mBTSRecordNumber; Index++) {
+ if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) {
+ //
+ // Underflow
+ //
+ CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1);
+ CurrentBTSRecord --;
+ }
+ if (CurrentBTSRecord->LastBranchTo == DestinationIP) {
+ //
+ // Good! find 1st one, then find 2nd one.
+ //
+ if (!FirstMatch) {
+ //
+ // The first one is DEBUG exception
+ //
+ FirstMatch = TRUE;
+ } else {
+ //
+ // Good find proper one.
+ //
+ return CurrentBTSRecord->LastBranchFrom;
+ }
+ }
+ CurrentBTSRecord--;
+ }
+
+ return 0;
+}
+
+/**
+ SMM profile specific INT 1 (single-step) exception handler.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+DebugExceptionHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN CpuIndex;
+ UINTN PFEntry;
+
+ if (!mSmmProfileStart &&
+ !HEAP_GUARD_NONSTOP_MODE &&
+ !NULL_DETECTION_NONSTOP_MODE) {
+ return;
+ }
+ CpuIndex = GetCpuIndex ();
+
+ //
+ // Clear last PF entries
+ //
+ for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) {
+ *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry];
+ }
+
+ //
+ // Reset page fault exception count for next page fault.
+ //
+ mPFEntryCount[CpuIndex] = 0;
+
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb ();
+
+ //
+ // Clear TF in EFLAGS
+ //
+ ClearTrapFlag (SystemContext);
+}
+
+/**
+ Check if the input address is in SMM ranges.
+
+ @param[in] Address The input address.
+
+ @retval TRUE The input address is in SMM.
+ @retval FALSE The input address is not in SMM.
+**/
+BOOLEAN
+IsInSmmRanges (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINTN Index;
+
+ if ((Address >= mCpuHotPlugData.SmrrBase) && (Address < mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
+ return TRUE;
+ }
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
+ if (Address >= mSmmCpuSmramRanges[Index].CpuStart &&
+ Address < mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+/**
+ Check if the memory address will be mapped by 4KB-page.
+
+ @param Address The address of Memory.
+ @param Nx The flag indicates if the memory is execute-disable.
+
+**/
+BOOLEAN
+IsAddressValid (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN BOOLEAN *Nx
+ )
+{
+ UINTN Index;
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ //
+ // Check configuration
+ //
+ for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
+ if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) {
+ *Nx = mProtectionMemRange[Index].Nx;
+ return mProtectionMemRange[Index].Present;
+ }
+ }
+ *Nx = TRUE;
+ return FALSE;
+
+ } else {
+ *Nx = TRUE;
+ if (IsInSmmRanges (Address)) {
+ *Nx = FALSE;
+ }
+ return TRUE;
+ }
+}
+
+/**
+ Check if the memory address will be mapped by 4KB-page.
+
+ @param Address The address of Memory.
+
+**/
+BOOLEAN
+IsAddressSplit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ )
+{
+ UINTN Index;
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ //
+ // Check configuration
+ //
+ for (Index = 0; Index < mSplitMemRangeCount; Index++) {
+ if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) {
+ return TRUE;
+ }
+ }
+ } else {
+ if (Address < mCpuHotPlugData.SmrrBase) {
+ if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) {
+ return TRUE;
+ }
+ } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) {
+ if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) {
+ return TRUE;
+ }
+ }
+ }
+ //
+ // Return default
+ //
+ return FALSE;
+}
+
+/**
+ Initialize the protected memory ranges and the 4KB-page mapped memory ranges.
+
+**/
+VOID
+InitProtectedMemRange (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN NumberOfDescriptors;
+ UINTN NumberOfAddedDescriptors;
+ UINTN NumberOfProtectRange;
+ UINTN NumberOfSpliteRange;
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;
+ UINTN TotalSize;
+ EFI_PHYSICAL_ADDRESS ProtectBaseAddress;
+ EFI_PHYSICAL_ADDRESS ProtectEndAddress;
+ EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress;
+ EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress;
+ UINT64 High4KBPageSize;
+ UINT64 Low4KBPageSize;
+
+ NumberOfDescriptors = 0;
+ NumberOfAddedDescriptors = mSmmCpuSmramRangeCount;
+ NumberOfSpliteRange = 0;
+ MemorySpaceMap = NULL;
+
+ //
+ // Get MMIO ranges from GCD and add them into protected memory ranges.
+ //
+ gDS->GetMemorySpaceMap (
+ &NumberOfDescriptors,
+ &MemorySpaceMap
+ );
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {
+ if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) {
+ NumberOfAddedDescriptors++;
+ }
+ }
+
+ if (NumberOfAddedDescriptors != 0) {
+ TotalSize = NumberOfAddedDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate);
+ mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize);
+ ASSERT (mProtectionMemRange != NULL);
+ mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE);
+
+ //
+ // Copy existing ranges.
+ //
+ CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate));
+
+ //
+ // Create split ranges which come from protected ranges.
+ //
+ TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE);
+ mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize);
+ ASSERT (mSplitMemRange != NULL);
+
+ //
+ // Create SMM ranges which are set to present and execution-enable.
+ //
+ NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
+ for (Index = 0; Index < mSmmCpuSmramRangeCount; Index++) {
+ if (mSmmCpuSmramRanges[Index].CpuStart >= mProtectionMemRange[0].Range.Base &&
+ mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize < mProtectionMemRange[0].Range.Top) {
+ //
+ // If the address have been already covered by mCpuHotPlugData.SmrrBase/mCpuHotPlugData.SmrrSiz
+ //
+ break;
+ }
+ mProtectionMemRange[NumberOfProtectRange].Range.Base = mSmmCpuSmramRanges[Index].CpuStart;
+ mProtectionMemRange[NumberOfProtectRange].Range.Top = mSmmCpuSmramRanges[Index].CpuStart + mSmmCpuSmramRanges[Index].PhysicalSize;
+ mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
+ mProtectionMemRange[NumberOfProtectRange].Nx = FALSE;
+ NumberOfProtectRange++;
+ }
+
+ //
+ // Create MMIO ranges which are set to present and execution-disable.
+ //
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {
+ if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) {
+ continue;
+ }
+ mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress;
+ mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length;
+ mProtectionMemRange[NumberOfProtectRange].Present = TRUE;
+ mProtectionMemRange[NumberOfProtectRange].Nx = TRUE;
+ NumberOfProtectRange++;
+ }
+
+ //
+ // Check and updated actual protected memory ranges count
+ //
+ ASSERT (NumberOfProtectRange <= mProtectionMemRangeCount);
+ mProtectionMemRangeCount = NumberOfProtectRange;
+ }
+
+ //
+ // According to protected ranges, create the ranges which will be mapped by 2KB page.
+ //
+ NumberOfSpliteRange = 0;
+ NumberOfProtectRange = mProtectionMemRangeCount;
+ for (Index = 0; Index < NumberOfProtectRange; Index++) {
+ //
+ // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table.
+ //
+ ProtectBaseAddress = mProtectionMemRange[Index].Range.Base;
+ ProtectEndAddress = mProtectionMemRange[Index].Range.Top;
+ if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) {
+ //
+ // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range.
+ // A mix of 4KB and 2MB page could save SMRAM space.
+ //
+ Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1);
+ Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
+ if ((Top2MBAlignedAddress > Base2MBAlignedAddress) &&
+ ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) {
+ //
+ // There is an range which could be mapped by 2MB-page.
+ //
+ High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1));
+ Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1));
+ if (High4KBPageSize != 0) {
+ //
+ // Add not 2MB-aligned range to be mapped by 4KB-page.
+ //
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1);
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
+ NumberOfSpliteRange++;
+ }
+ if (Low4KBPageSize != 0) {
+ //
+ // Add not 2MB-aligned range to be mapped by 4KB-page.
+ //
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
+ NumberOfSpliteRange++;
+ }
+ } else {
+ //
+ // The range could only be mapped by 4KB-page.
+ //
+ mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1);
+ mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1);
+ NumberOfSpliteRange++;
+ }
+ }
+ }
+
+ mSplitMemRangeCount = NumberOfSpliteRange;
+
+ DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n"));
+ for (Index = 0; Index < mProtectionMemRangeCount; Index++) {
+ DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base));
+ DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top));
+ }
+ for (Index = 0; Index < mSplitMemRangeCount; Index++) {
+ DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base));
+ DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top));
+ }
+}
+
+/**
+ Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
+
+**/
+VOID
+InitPaging (
+ VOID
+ )
+{
+ UINT64 Pml5Entry;
+ UINT64 Pml4Entry;
+ UINT64 *Pml5;
+ UINT64 *Pml4;
+ UINT64 *Pdpt;
+ UINT64 *Pd;
+ UINT64 *Pt;
+ UINTN Address;
+ UINTN Pml5Index;
+ UINTN Pml4Index;
+ UINTN PdptIndex;
+ UINTN PdIndex;
+ UINTN PtIndex;
+ UINTN NumberOfPdptEntries;
+ UINTN NumberOfPml4Entries;
+ UINTN NumberOfPml5Entries;
+ UINTN SizeOfMemorySpace;
+ BOOLEAN Nx;
+ IA32_CR4 Cr4;
+ BOOLEAN Enable5LevelPaging;
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+
+ if (sizeof (UINTN) == sizeof (UINT64)) {
+ if (!Enable5LevelPaging) {
+ Pml5Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
+ Pml5 = &Pml5Entry;
+ } else {
+ Pml5 = (UINT64*) (UINTN) mSmmProfileCr3;
+ }
+ SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1;
+ //
+ // Calculate the table entries of PML4E and PDPTE.
+ //
+ NumberOfPml5Entries = 1;
+ if (SizeOfMemorySpace > 48) {
+ NumberOfPml5Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 48);
+ SizeOfMemorySpace = 48;
+ }
+
+ NumberOfPml4Entries = 1;
+ if (SizeOfMemorySpace > 39) {
+ NumberOfPml4Entries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 39);
+ SizeOfMemorySpace = 39;
+ }
+
+ NumberOfPdptEntries = 1;
+ ASSERT (SizeOfMemorySpace > 30);
+ NumberOfPdptEntries = (UINTN) LShiftU64 (1, SizeOfMemorySpace - 30);
+ } else {
+ Pml4Entry = (UINTN) mSmmProfileCr3 | IA32_PG_P;
+ Pml4 = &Pml4Entry;
+ Pml5Entry = (UINTN) Pml4 | IA32_PG_P;
+ Pml5 = &Pml5Entry;
+ NumberOfPml5Entries = 1;
+ NumberOfPml4Entries = 1;
+ NumberOfPdptEntries = 4;
+ }
+
+ //
+ // Go through page table and change 2MB-page into 4KB-page.
+ //
+ for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
+ if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
+ //
+ // If PML5 entry does not exist, skip it
+ //
+ continue;
+ }
+ Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
+ for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
+ if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
+ //
+ // If PML4 entry does not exist, skip it
+ //
+ continue;
+ }
+ Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
+ if ((*Pdpt & IA32_PG_P) == 0) {
+ //
+ // If PDPT entry does not exist, skip it
+ //
+ continue;
+ }
+ if ((*Pdpt & IA32_PG_PS) != 0) {
+ //
+ // This is 1G entry, skip it
+ //
+ continue;
+ }
+ Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ if (Pd == 0) {
+ continue;
+ }
+ for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
+ if ((*Pd & IA32_PG_P) == 0) {
+ //
+ // If PD entry does not exist, skip it
+ //
+ continue;
+ }
+ Address = (UINTN) LShiftU64 (
+ LShiftU64 (
+ LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
+ 9
+ ) + PdIndex,
+ 21
+ );
+
+ //
+ // If it is 2M page, check IsAddressSplit()
+ //
+ if (((*Pd & IA32_PG_PS) != 0) && IsAddressSplit (Address)) {
+ //
+ // Based on current page table, create 4KB page table for split area.
+ //
+ ASSERT (Address == (*Pd & PHYSICAL_ADDRESS_MASK));
+
+ Pt = AllocatePageTableMemory (1);
+ ASSERT (Pt != NULL);
+
+ // Split it
+ for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++) {
+ Pt[PtIndex] = Address + ((PtIndex << 12) | mAddressEncMask | PAGE_ATTRIBUTE_BITS);
+ } // end for PT
+ *Pd = (UINT64)(UINTN)Pt | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ } // end if IsAddressSplit
+ } // end for PD
+ } // end for PDPT
+ } // end for PML4
+ } // end for PML5
+
+ //
+ // Go through page table and set several page table entries to absent or execute-disable.
+ //
+ DEBUG ((EFI_D_INFO, "Patch page table start ...\n"));
+ for (Pml5Index = 0; Pml5Index < NumberOfPml5Entries; Pml5Index++) {
+ if ((Pml5[Pml5Index] & IA32_PG_P) == 0) {
+ //
+ // If PML5 entry does not exist, skip it
+ //
+ continue;
+ }
+ Pml4 = (UINT64 *) (UINTN) (Pml5[Pml5Index] & PHYSICAL_ADDRESS_MASK);
+ for (Pml4Index = 0; Pml4Index < NumberOfPml4Entries; Pml4Index++) {
+ if ((Pml4[Pml4Index] & IA32_PG_P) == 0) {
+ //
+ // If PML4 entry does not exist, skip it
+ //
+ continue;
+ }
+ Pdpt = (UINT64 *)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ for (PdptIndex = 0; PdptIndex < NumberOfPdptEntries; PdptIndex++, Pdpt++) {
+ if ((*Pdpt & IA32_PG_P) == 0) {
+ //
+ // If PDPT entry does not exist, skip it
+ //
+ continue;
+ }
+ if ((*Pdpt & IA32_PG_PS) != 0) {
+ //
+ // This is 1G entry, set NX bit and skip it
+ //
+ if (mXdSupported) {
+ *Pdpt = *Pdpt | IA32_PG_NX;
+ }
+ continue;
+ }
+ Pd = (UINT64 *)(UINTN)(*Pdpt & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ if (Pd == 0) {
+ continue;
+ }
+ for (PdIndex = 0; PdIndex < SIZE_4KB / sizeof (*Pd); PdIndex++, Pd++) {
+ if ((*Pd & IA32_PG_P) == 0) {
+ //
+ // If PD entry does not exist, skip it
+ //
+ continue;
+ }
+ Address = (UINTN) LShiftU64 (
+ LShiftU64 (
+ LShiftU64 ((Pml5Index << 9) + Pml4Index, 9) + PdptIndex,
+ 9
+ ) + PdIndex,
+ 21
+ );
+
+ if ((*Pd & IA32_PG_PS) != 0) {
+ // 2MB page
+
+ if (!IsAddressValid (Address, &Nx)) {
+ //
+ // Patch to remove Present flag and RW flag
+ //
+ *Pd = *Pd & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
+ }
+ if (Nx && mXdSupported) {
+ *Pd = *Pd | IA32_PG_NX;
+ }
+ } else {
+ // 4KB page
+ Pt = (UINT64 *)(UINTN)(*Pd & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ if (Pt == 0) {
+ continue;
+ }
+ for (PtIndex = 0; PtIndex < SIZE_4KB / sizeof(*Pt); PtIndex++, Pt++) {
+ if (!IsAddressValid (Address, &Nx)) {
+ *Pt = *Pt & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
+ }
+ if (Nx && mXdSupported) {
+ *Pt = *Pt | IA32_PG_NX;
+ }
+ Address += SIZE_4KB;
+ } // end for PT
+ } // end if PS
+ } // end for PD
+ } // end for PDPT
+ } // end for PML4
+ } // end for PML5
+
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb ();
+ DEBUG ((EFI_D_INFO, "Patch page table done!\n"));
+ //
+ // Set execute-disable flag
+ //
+ mXdEnabled = TRUE;
+
+ return ;
+}
+
+/**
+ To get system port address of the SMI Command Port in FADT table.
+
+**/
+VOID
+GetSmiCommandPort (
+ VOID
+ )
+{
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt;
+
+ Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) EfiLocateFirstAcpiTable (
+ EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE
+ );
+ ASSERT (Fadt != NULL);
+
+ mSmiCommandPort = Fadt->SmiCmd;
+ DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort));
+}
+
+/**
+ Updates page table to make some memory ranges (like system memory) absent
+ and make some memory ranges (like MMIO) present and execute disable. It also
+ update 2MB-page to 4KB-page for some memory ranges.
+
+**/
+VOID
+SmmProfileStart (
+ VOID
+ )
+{
+ //
+ // The flag indicates SMM profile starts to work.
+ //
+ mSmmProfileStart = TRUE;
+}
+
+/**
+ Initialize SMM profile in SmmReadyToLock protocol callback function.
+
+ @param Protocol Points to the protocol's unique identifier.
+ @param Interface Points to the interface instance.
+ @param Handle The handle on which the interface was installed.
+
+ @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully.
+**/
+EFI_STATUS
+EFIAPI
+InitSmmProfileCallBack (
+ IN CONST EFI_GUID *Protocol,
+ IN VOID *Interface,
+ IN EFI_HANDLE Handle
+ )
+{
+ //
+ // Save to variable so that SMM profile data can be found.
+ //
+ gRT->SetVariable (
+ SMM_PROFILE_NAME,
+ &gEfiCallerIdGuid,
+ EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS,
+ sizeof(mSmmProfileBase),
+ &mSmmProfileBase
+ );
+
+ //
+ // Get Software SMI from FADT
+ //
+ GetSmiCommandPort ();
+
+ //
+ // Initialize protected memory range for patching page table later.
+ //
+ InitProtectedMemRange ();
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Initialize SMM profile data structures.
+
+**/
+VOID
+InitSmmProfileInternal (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS Base;
+ VOID *Registration;
+ UINTN Index;
+ UINTN MsrDsAreaSizePerCpu;
+ UINTN TotalSize;
+
+ mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * mMaxNumberOfCpus);
+ ASSERT (mPFEntryCount != NULL);
+ mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
+ sizeof (mLastPFEntryValue[0]) * mMaxNumberOfCpus);
+ ASSERT (mLastPFEntryValue != NULL);
+ mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool (
+ sizeof (mLastPFEntryPointer[0]) * mMaxNumberOfCpus);
+ ASSERT (mLastPFEntryPointer != NULL);
+
+ //
+ // Allocate memory for SmmProfile below 4GB.
+ // The base address
+ //
+ mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize);
+ ASSERT ((mSmmProfileSize & 0xFFF) == 0);
+
+ if (mBtsSupported) {
+ TotalSize = mSmmProfileSize + mMsrDsAreaSize;
+ } else {
+ TotalSize = mSmmProfileSize;
+ }
+
+ Base = 0xFFFFFFFF;
+ Status = gBS->AllocatePages (
+ AllocateMaxAddress,
+ EfiReservedMemoryType,
+ EFI_SIZE_TO_PAGES (TotalSize),
+ &Base
+ );
+ ASSERT_EFI_ERROR (Status);
+ ZeroMem ((VOID *)(UINTN)Base, TotalSize);
+ mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base;
+
+ //
+ // Initialize SMM profile data header.
+ //
+ mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER);
+ mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY));
+ mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY));
+ mSmmProfileBase->CurDataEntries = 0;
+ mSmmProfileBase->CurDataSize = 0;
+ mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase;
+ mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize;
+ mSmmProfileBase->NumSmis = 0;
+ mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+
+ if (mBtsSupported) {
+ mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * mMaxNumberOfCpus);
+ ASSERT (mMsrDsArea != NULL);
+ mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * mMaxNumberOfCpus);
+ ASSERT (mMsrBTSRecord != NULL);
+ mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * mMaxNumberOfCpus);
+ ASSERT (mMsrPEBSRecord != NULL);
+
+ mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize);
+ MsrDsAreaSizePerCpu = mMsrDsAreaSize / mMaxNumberOfCpus;
+ mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD);
+ for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
+ mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index);
+ mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT));
+ mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER);
+
+ mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index];
+ mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase;
+ mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1;
+ mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1;
+
+ mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index];
+ mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase;
+ mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1;
+ mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1;
+ }
+ }
+
+ mProtectionMemRange = mProtectionMemRangeTemplate;
+ mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE);
+
+ //
+ // Update TSeg entry.
+ //
+ mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase;
+ mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize;
+
+ //
+ // Update SMM profile entry.
+ //
+ mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase;
+ mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize;
+
+ //
+ // Allocate memory reserved for creating 4KB pages.
+ //
+ InitPagesForPFHandler ();
+
+ //
+ // Start SMM profile when SmmReadyToLock protocol is installed.
+ //
+ Status = gSmst->SmmRegisterProtocolNotify (
+ &gEfiSmmReadyToLockProtocolGuid,
+ InitSmmProfileCallBack,
+ &Registration
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ return ;
+}
+
+/**
+ Check if feature is supported by a processor.
+
+**/
+VOID
+CheckFeatureSupported (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT32 RegEcx;
+ UINT32 RegEdx;
+ MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
+
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
+ if (RegEax <= CPUID_EXTENDED_FUNCTION) {
+ mCetSupported = FALSE;
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
+ }
+ AsmCpuidEx (CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS, CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO, NULL, NULL, &RegEcx, NULL);
+ if ((RegEcx & CPUID_CET_SS) == 0) {
+ mCetSupported = FALSE;
+ PatchInstructionX86 (mPatchCetSupported, mCetSupported, 1);
+ }
+ }
+
+ if (mXdSupported) {
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
+ if (RegEax <= CPUID_EXTENDED_FUNCTION) {
+ //
+ // Extended CPUID functions are not supported on this processor.
+ //
+ mXdSupported = FALSE;
+ PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
+ }
+
+ AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) {
+ //
+ // Execute Disable Bit feature is not supported on this processor.
+ //
+ mXdSupported = FALSE;
+ PatchInstructionX86 (gPatchXdSupported, mXdSupported, 1);
+ }
+
+ if (StandardSignatureIsAuthenticAMD ()) {
+ //
+ // AMD processors do not support MSR_IA32_MISC_ENABLE
+ //
+ PatchInstructionX86 (gPatchMsrIa32MiscEnableSupported, FALSE, 1);
+ }
+ }
+
+ if (mBtsSupported) {
+ AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) {
+ //
+ // Per IA32 manuals:
+ // When CPUID.1:EDX[21] is set, the following BTS facilities are available:
+ // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the
+ // availability of the BTS facilities, including the ability to set the BTS and
+ // BTINT bits in the MSR_DEBUGCTLA MSR.
+ // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area.
+ //
+ MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
+ if (MiscEnableMsr.Bits.BTS == 1) {
+ //
+ // BTS facilities is not supported if MSR_IA32_MISC_ENABLE.BTS bit is set.
+ //
+ mBtsSupported = FALSE;
+ }
+ }
+ }
+}
+
+/**
+ Enable single step.
+
+**/
+VOID
+ActivateSingleStepDB (
+ VOID
+ )
+{
+ UINTN Dr6;
+
+ Dr6 = AsmReadDr6 ();
+ if ((Dr6 & DR6_SINGLE_STEP) != 0) {
+ return;
+ }
+ Dr6 |= DR6_SINGLE_STEP;
+ AsmWriteDr6 (Dr6);
+}
+
+/**
+ Enable last branch.
+
+**/
+VOID
+ActivateLBR (
+ VOID
+ )
+{
+ UINT64 DebugCtl;
+
+ DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
+ if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) {
+ return ;
+ }
+ DebugCtl |= MSR_DEBUG_CTL_LBR;
+ AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
+}
+
+/**
+ Enable branch trace store.
+
+ @param CpuIndex The index of the processor.
+
+**/
+VOID
+ActivateBTS (
+ IN UINTN CpuIndex
+ )
+{
+ UINT64 DebugCtl;
+
+ DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL);
+ if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) {
+ return ;
+ }
+
+ AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]);
+ DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR);
+ DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT);
+ AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl);
+}
+
+/**
+ Increase SMI number in each SMI entry.
+
+**/
+VOID
+SmmProfileRecordSmiNum (
+ VOID
+ )
+{
+ if (mSmmProfileStart) {
+ mSmmProfileBase->NumSmis++;
+ }
+}
+
+/**
+ Initialize processor environment for SMM profile.
+
+ @param CpuIndex The index of the processor.
+
+**/
+VOID
+ActivateSmmProfile (
+ IN UINTN CpuIndex
+ )
+{
+ //
+ // Enable Single Step DB#
+ //
+ ActivateSingleStepDB ();
+
+ if (mBtsSupported) {
+ //
+ // We can not get useful information from LER, so we have to use BTS.
+ //
+ ActivateLBR ();
+
+ //
+ // Enable BTS
+ //
+ ActivateBTS (CpuIndex);
+ }
+}
+
+/**
+ Initialize SMM profile in SMM CPU entry point.
+
+ @param[in] Cr3 The base address of the page tables to use in SMM.
+
+**/
+VOID
+InitSmmProfile (
+ UINT32 Cr3
+ )
+{
+ //
+ // Save Cr3
+ //
+ mSmmProfileCr3 = Cr3;
+
+ //
+ // Skip SMM profile initialization if feature is disabled
+ //
+ if (!FeaturePcdGet (PcdCpuSmmProfileEnable) &&
+ !HEAP_GUARD_NONSTOP_MODE &&
+ !NULL_DETECTION_NONSTOP_MODE) {
+ return;
+ }
+
+ //
+ // Initialize SmmProfile here
+ //
+ InitSmmProfileInternal ();
+
+ //
+ // Initialize profile IDT.
+ //
+ InitIdtr ();
+
+ //
+ // Tell #PF handler to prepare a #DB subsequently.
+ //
+ mSetupDebugTrap = TRUE;
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+
+**/
+VOID
+RestorePageTableBelow4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode
+ )
+{
+ UINTN PTIndex;
+ UINTN PFIndex;
+ IA32_CR4 Cr4;
+ BOOLEAN Enable5LevelPaging;
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+
+ //
+ // PML5
+ //
+ if (Enable5LevelPaging) {
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 48, 56);
+ ASSERT (PageTable[PTIndex] != 0);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ }
+
+ //
+ // PML4
+ //
+ if (sizeof(UINT64) == sizeof(UINTN)) {
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47);
+ ASSERT (PageTable[PTIndex] != 0);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+ }
+
+ //
+ // PDPTE
+ //
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38);
+ ASSERT (PageTable[PTIndex] != 0);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+
+ //
+ // PD
+ //
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29);
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
+ //
+ // Large page
+ //
+
+ //
+ // Record old entries with non-present status
+ // Old entries include the memory which instruction is at and the memory which instruction access.
+ //
+ //
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
+ PFIndex = mPFEntryCount[CpuIndex];
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
+ mPFEntryCount[CpuIndex]++;
+ }
+
+ //
+ // Set new entry
+ //
+ PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1));
+ PageTable[PTIndex] |= (UINT64)IA32_PG_PS;
+ PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {
+ PageTable[PTIndex] &= ~IA32_PG_NX;
+ }
+ } else {
+ //
+ // Small page
+ //
+ ASSERT (PageTable[PTIndex] != 0);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK);
+
+ //
+ // 4K PTE
+ //
+ PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20);
+
+ //
+ // Record old entries with non-present status
+ // Old entries include the memory which instruction is at and the memory which instruction access.
+ //
+ //
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
+ PFIndex = mPFEntryCount[CpuIndex];
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
+ mPFEntryCount[CpuIndex]++;
+ }
+
+ //
+ // Set new entry
+ //
+ PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1));
+ PageTable[PTIndex] |= (UINT64)PAGE_ATTRIBUTE_BITS;
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {
+ PageTable[PTIndex] &= ~IA32_PG_NX;
+ }
+ }
+}
+
+/**
+ Handler for Page Fault triggered by Guard page.
+
+ @param ErrorCode The Error code of exception.
+
+**/
+VOID
+GuardPagePFHandler (
+ UINTN ErrorCode
+ )
+{
+ UINT64 *PageTable;
+ UINT64 PFAddress;
+ UINT64 RestoreAddress;
+ UINTN RestorePageNumber;
+ UINTN CpuIndex;
+
+ PageTable = (UINT64 *)AsmReadCr3 ();
+ PFAddress = AsmReadCr2 ();
+ CpuIndex = GetCpuIndex ();
+
+ //
+ // Memory operation cross pages, like "rep mov" instruction, will cause
+ // infinite loop between this and Debug Trap handler. We have to make sure
+ // that current page and the page followed are both in PRESENT state.
+ //
+ RestorePageNumber = 2;
+ RestoreAddress = PFAddress;
+ while (RestorePageNumber > 0) {
+ RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
+ RestoreAddress += EFI_PAGE_SIZE;
+ RestorePageNumber--;
+ }
+
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb ();
+}
+
+/**
+ The Page fault handler to save SMM profile data.
+
+ @param Rip The RIP when exception happens.
+ @param ErrorCode The Error code of exception.
+
+**/
+VOID
+SmmProfilePFHandler (
+ UINTN Rip,
+ UINTN ErrorCode
+ )
+{
+ UINT64 *PageTable;
+ UINT64 PFAddress;
+ UINT64 RestoreAddress;
+ UINTN RestorePageNumber;
+ UINTN CpuIndex;
+ UINTN Index;
+ UINT64 InstructionAddress;
+ UINTN MaxEntryNumber;
+ UINTN CurrentEntryNumber;
+ BOOLEAN IsValidPFAddress;
+ SMM_PROFILE_ENTRY *SmmProfileEntry;
+ UINT64 SmiCommand;
+ EFI_STATUS Status;
+ UINT8 SoftSmiValue;
+ EFI_SMM_SAVE_STATE_IO_INFO IoInfo;
+
+ if (!mSmmProfileStart) {
+ //
+ // If SMM profile does not start, call original page fault handler.
+ //
+ SmiDefaultPFHandler ();
+ return;
+ }
+
+ if (mBtsSupported) {
+ DisableBTS ();
+ }
+
+ IsValidPFAddress = FALSE;
+ PageTable = (UINT64 *)AsmReadCr3 ();
+ PFAddress = AsmReadCr2 ();
+ CpuIndex = GetCpuIndex ();
+
+ //
+ // Memory operation cross pages, like "rep mov" instruction, will cause
+ // infinite loop between this and Debug Trap handler. We have to make sure
+ // that current page and the page followed are both in PRESENT state.
+ //
+ RestorePageNumber = 2;
+ RestoreAddress = PFAddress;
+ while (RestorePageNumber > 0) {
+ if (RestoreAddress <= 0xFFFFFFFF) {
+ RestorePageTableBelow4G (PageTable, RestoreAddress, CpuIndex, ErrorCode);
+ } else {
+ RestorePageTableAbove4G (PageTable, RestoreAddress, CpuIndex, ErrorCode, &IsValidPFAddress);
+ }
+ RestoreAddress += EFI_PAGE_SIZE;
+ RestorePageNumber--;
+ }
+
+ if (!IsValidPFAddress) {
+ InstructionAddress = Rip;
+ if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) {
+ //
+ // If it is instruction fetch failure, get the correct IP from BTS.
+ //
+ InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip);
+ if (InstructionAddress == 0) {
+ //
+ // It indicates the instruction which caused page fault is not a jump instruction,
+ // set instruction address same as the page fault address.
+ //
+ InstructionAddress = PFAddress;
+ }
+ }
+
+ //
+ // Indicate it is not software SMI
+ //
+ SmiCommand = 0xFFFFFFFFFFFFFFFFULL;
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
+ Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo);
+ if (EFI_ERROR (Status)) {
+ continue;
+ }
+ if (IoInfo.IoPort == mSmiCommandPort) {
+ //
+ // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port.
+ //
+ SoftSmiValue = IoRead8 (mSmiCommandPort);
+ SmiCommand = (UINT64)SoftSmiValue;
+ break;
+ }
+ }
+
+ SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1);
+ //
+ // Check if there is already a same entry in profile data.
+ //
+ for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) {
+ if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) &&
+ (SmmProfileEntry[Index].Address == PFAddress) &&
+ (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) &&
+ (SmmProfileEntry[Index].Instruction == InstructionAddress) &&
+ (SmmProfileEntry[Index].SmiCmd == SmiCommand)) {
+ //
+ // Same record exist, need not save again.
+ //
+ break;
+ }
+ }
+ if (Index == mSmmProfileBase->CurDataEntries) {
+ CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries;
+ MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries;
+ if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) {
+ CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber;
+ }
+ if (CurrentEntryNumber < MaxEntryNumber) {
+ //
+ // Log the new entry
+ //
+ SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis;
+ SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode;
+ SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId ();
+ SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex;
+ SmmProfileEntry[CurrentEntryNumber].Address = PFAddress;
+ SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress;
+ SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand;
+ //
+ // Update current entry index and data size in the header.
+ //
+ mSmmProfileBase->CurDataEntries++;
+ mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY));
+ }
+ }
+ }
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb ();
+
+ if (mBtsSupported) {
+ EnableBTS ();
+ }
+}
+
+/**
+ Replace INT1 exception handler to restore page table to absent/execute-disable state
+ in order to trigger page fault again to save SMM profile data..
+
+**/
+VOID
+InitIdtr (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+
+ Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler);
+ ASSERT_EFI_ERROR (Status);
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h
new file mode 100644
index 00000000..24916d30
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h
@@ -0,0 +1,135 @@
+/** @file
+SMM profile header file.
+
+Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _SMM_PROFILE_H_
+#define _SMM_PROFILE_H_
+
+#include "SmmProfileInternal.h"
+
+//
+// External functions
+//
+
+/**
+ Initialize processor environment for SMM profile.
+
+ @param CpuIndex The index of the processor.
+
+**/
+VOID
+ActivateSmmProfile (
+ IN UINTN CpuIndex
+ );
+
+/**
+ Initialize SMM profile in SMM CPU entry point.
+
+ @param[in] Cr3 The base address of the page tables to use in SMM.
+
+**/
+VOID
+InitSmmProfile (
+ UINT32 Cr3
+ );
+
+/**
+ Increase SMI number in each SMI entry.
+
+**/
+VOID
+SmmProfileRecordSmiNum (
+ VOID
+ );
+
+/**
+ The Page fault handler to save SMM profile data.
+
+ @param Rip The RIP when exception happens.
+ @param ErrorCode The Error code of exception.
+
+**/
+VOID
+SmmProfilePFHandler (
+ UINTN Rip,
+ UINTN ErrorCode
+ );
+
+/**
+ Updates page table to make some memory ranges (like system memory) absent
+ and make some memory ranges (like MMIO) present and execute disable. It also
+ update 2MB-page to 4KB-page for some memory ranges.
+
+**/
+VOID
+SmmProfileStart (
+ VOID
+ );
+
+/**
+ Page fault IDT handler for SMM Profile.
+
+**/
+VOID
+EFIAPI
+PageFaultIdtHandlerSmmProfile (
+ VOID
+ );
+
+
+/**
+ Check if feature is supported by a processor.
+
+**/
+VOID
+CheckFeatureSupported (
+ VOID
+ );
+
+/**
+ Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
+
+**/
+VOID
+InitPaging (
+ VOID
+ );
+
+/**
+ Get CPU Index from APIC ID.
+
+**/
+UINTN
+GetCpuIndex (
+ VOID
+ );
+
+/**
+ Handler for Page Fault triggered by Guard page.
+
+ @param ErrorCode The Error code of exception.
+
+**/
+VOID
+GuardPagePFHandler (
+ UINTN ErrorCode
+ );
+
+//
+// The flag indicates if execute-disable is supported by processor.
+//
+extern BOOLEAN mXdSupported;
+//
+// The flag indicates if execute-disable is enabled on processor.
+//
+extern BOOLEAN mXdEnabled;
+//
+// The flag indicates if #DB will be setup in #PF handler.
+//
+extern BOOLEAN mSetupDebugTrap;
+
+#endif // _SMM_PROFILE_H_
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h
new file mode 100644
index 00000000..0054da24
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h
@@ -0,0 +1,167 @@
+/** @file
+SMM profile internal header file.
+
+Copyright (c) 2012 - 2018, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _SMM_PROFILE_INTERNAL_H_
+#define _SMM_PROFILE_INTERNAL_H_
+
+#include <Protocol/SmmReadyToLock.h>
+#include <Library/UefiRuntimeServicesTableLib.h>
+#include <Library/DxeServicesTableLib.h>
+#include <Library/CpuLib.h>
+#include <Library/UefiCpuLib.h>
+#include <IndustryStandard/Acpi.h>
+
+#include "SmmProfileArch.h"
+
+//
+// Configure the SMM_PROFILE DTS region size
+//
+#define SMM_PROFILE_DTS_SIZE (4 * 1024 * 1024) // 4M
+
+#define MAX_PF_PAGE_COUNT 0x2
+
+#define PEBS_RECORD_NUMBER 0x2
+
+#define MAX_PF_ENTRY_COUNT 10
+
+//
+// This MACRO just enable unit test for the profile
+// Please disable it.
+//
+
+#define IA32_PF_EC_ID (1u << 4)
+
+#define SMM_PROFILE_NAME L"SmmProfileData"
+
+//
+// CPU generic definition
+//
+#define CPUID1_EDX_XD_SUPPORT 0x100000
+#define MSR_EFER 0xc0000080
+#define MSR_EFER_XD 0x800
+
+#define CPUID1_EDX_BTS_AVAILABLE 0x200000
+
+#define DR6_SINGLE_STEP 0x4000
+#define RFLAG_TF 0x100
+
+#define MSR_DEBUG_CTL 0x1D9
+#define MSR_DEBUG_CTL_LBR 0x1
+#define MSR_DEBUG_CTL_TR 0x40
+#define MSR_DEBUG_CTL_BTS 0x80
+#define MSR_DEBUG_CTL_BTINT 0x100
+#define MSR_DS_AREA 0x600
+
+#define HEAP_GUARD_NONSTOP_MODE \
+ ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT6|BIT3|BIT2)) > BIT6)
+
+#define NULL_DETECTION_NONSTOP_MODE \
+ ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & (BIT6|BIT1)) > BIT6)
+
+typedef struct {
+ EFI_PHYSICAL_ADDRESS Base;
+ EFI_PHYSICAL_ADDRESS Top;
+} MEMORY_RANGE;
+
+typedef struct {
+ MEMORY_RANGE Range;
+ BOOLEAN Present;
+ BOOLEAN Nx;
+} MEMORY_PROTECTION_RANGE;
+
+typedef struct {
+ UINT64 HeaderSize;
+ UINT64 MaxDataEntries;
+ UINT64 MaxDataSize;
+ UINT64 CurDataEntries;
+ UINT64 CurDataSize;
+ UINT64 TsegStart;
+ UINT64 TsegSize;
+ UINT64 NumSmis;
+ UINT64 NumCpus;
+} SMM_PROFILE_HEADER;
+
+typedef struct {
+ UINT64 SmiNum;
+ UINT64 CpuNum;
+ UINT64 ApicId;
+ UINT64 ErrorCode;
+ UINT64 Instruction;
+ UINT64 Address;
+ UINT64 SmiCmd;
+} SMM_PROFILE_ENTRY;
+
+extern SMM_S3_RESUME_STATE *mSmmS3ResumeState;
+extern UINTN gSmiExceptionHandlers[];
+extern BOOLEAN mXdSupported;
+X86_ASSEMBLY_PATCH_LABEL gPatchXdSupported;
+X86_ASSEMBLY_PATCH_LABEL gPatchMsrIa32MiscEnableSupported;
+extern UINTN *mPFEntryCount;
+extern UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT];
+extern UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT];
+
+//
+// Internal functions
+//
+
+/**
+ Update IDT table to replace page fault handler and INT 1 handler.
+
+**/
+VOID
+InitIdtr (
+ VOID
+ );
+
+/**
+ Check if the memory address will be mapped by 4KB-page.
+
+ @param Address The address of Memory.
+
+**/
+BOOLEAN
+IsAddressSplit (
+ IN EFI_PHYSICAL_ADDRESS Address
+ );
+
+/**
+ Check if the memory address will be mapped by 4KB-page.
+
+ @param Address The address of Memory.
+ @param Nx The flag indicates if the memory is execute-disable.
+
+**/
+BOOLEAN
+IsAddressValid (
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN BOOLEAN *Nx
+ );
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+SmiDefaultPFHandler (
+ VOID
+ );
+
+/**
+ Clear TF in FLAGS.
+
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+
+**/
+VOID
+ClearTrapFlag (
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ );
+
+#endif // _SMM_PROFILE_H_
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
new file mode 100644
index 00000000..9fc5a498
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
@@ -0,0 +1,742 @@
+/** @file
+Provides services to access SMRAM Save State Map
+
+Copyright (c) 2010 - 2019, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <PiSmm.h>
+
+#include <Library/SmmCpuFeaturesLib.h>
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/SmmServicesTableLib.h>
+#include <Library/DebugLib.h>
+
+#include "PiSmmCpuDxeSmm.h"
+
+typedef struct {
+ UINT64 Signature; // Offset 0x00
+ UINT16 Reserved1; // Offset 0x08
+ UINT16 Reserved2; // Offset 0x0A
+ UINT16 Reserved3; // Offset 0x0C
+ UINT16 SmmCs; // Offset 0x0E
+ UINT16 SmmDs; // Offset 0x10
+ UINT16 SmmSs; // Offset 0x12
+ UINT16 SmmOtherSegment; // Offset 0x14
+ UINT16 Reserved4; // Offset 0x16
+ UINT64 Reserved5; // Offset 0x18
+ UINT64 Reserved6; // Offset 0x20
+ UINT64 Reserved7; // Offset 0x28
+ UINT64 SmmGdtPtr; // Offset 0x30
+ UINT32 SmmGdtSize; // Offset 0x38
+ UINT32 Reserved8; // Offset 0x3C
+ UINT64 Reserved9; // Offset 0x40
+ UINT64 Reserved10; // Offset 0x48
+ UINT16 Reserved11; // Offset 0x50
+ UINT16 Reserved12; // Offset 0x52
+ UINT32 Reserved13; // Offset 0x54
+ UINT64 Reserved14; // Offset 0x58
+} PROCESSOR_SMM_DESCRIPTOR;
+
+extern CONST PROCESSOR_SMM_DESCRIPTOR gcPsd;
+
+//
+// EFER register LMA bit
+//
+#define LMA BIT10
+
+///
+/// Macro used to simplify the lookup table entries of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY
+///
+#define SMM_CPU_OFFSET(Field) OFFSET_OF (SMRAM_SAVE_STATE_MAP, Field)
+
+///
+/// Macro used to simplify the lookup table entries of type CPU_SMM_SAVE_STATE_REGISTER_RANGE
+///
+#define SMM_REGISTER_RANGE(Start, End) { Start, End, End - Start + 1 }
+
+///
+/// Structure used to describe a range of registers
+///
+typedef struct {
+ EFI_SMM_SAVE_STATE_REGISTER Start;
+ EFI_SMM_SAVE_STATE_REGISTER End;
+ UINTN Length;
+} CPU_SMM_SAVE_STATE_REGISTER_RANGE;
+
+///
+/// Structure used to build a lookup table to retrieve the widths and offsets
+/// associated with each supported EFI_SMM_SAVE_STATE_REGISTER value
+///
+
+#define SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX 1
+#define SMM_SAVE_STATE_REGISTER_IOMISC_INDEX 2
+#define SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX 3
+#define SMM_SAVE_STATE_REGISTER_MAX_INDEX 4
+
+typedef struct {
+ UINT8 Width32;
+ UINT8 Width64;
+ UINT16 Offset32;
+ UINT16 Offset64Lo;
+ UINT16 Offset64Hi;
+ BOOLEAN Writeable;
+} CPU_SMM_SAVE_STATE_LOOKUP_ENTRY;
+
+///
+/// Structure used to build a lookup table for the IOMisc width information
+///
+typedef struct {
+ UINT8 Width;
+ EFI_SMM_SAVE_STATE_IO_WIDTH IoWidth;
+} CPU_SMM_SAVE_STATE_IO_WIDTH;
+
+///
+/// Variables from SMI Handler
+///
+X86_ASSEMBLY_PATCH_LABEL gPatchSmbase;
+X86_ASSEMBLY_PATCH_LABEL gPatchSmiStack;
+X86_ASSEMBLY_PATCH_LABEL gPatchSmiCr3;
+extern volatile UINT8 gcSmiHandlerTemplate[];
+extern CONST UINT16 gcSmiHandlerSize;
+
+//
+// Variables used by SMI Handler
+//
+IA32_DESCRIPTOR gSmiHandlerIdtr;
+
+///
+/// Table used by GetRegisterIndex() to convert an EFI_SMM_SAVE_STATE_REGISTER
+/// value to an index into a table of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY
+///
+CONST CPU_SMM_SAVE_STATE_REGISTER_RANGE mSmmCpuRegisterRanges[] = {
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_GDTBASE, EFI_SMM_SAVE_STATE_REGISTER_LDTINFO),
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_ES, EFI_SMM_SAVE_STATE_REGISTER_RIP),
+ SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_RFLAGS, EFI_SMM_SAVE_STATE_REGISTER_CR4),
+ { (EFI_SMM_SAVE_STATE_REGISTER)0, (EFI_SMM_SAVE_STATE_REGISTER)0, 0 }
+};
+
+///
+/// Lookup table used to retrieve the widths and offsets associated with each
+/// supported EFI_SMM_SAVE_STATE_REGISTER value
+///
+CONST CPU_SMM_SAVE_STATE_LOOKUP_ENTRY mSmmCpuWidthOffset[] = {
+ {0, 0, 0, 0, 0, FALSE}, // Reserved
+
+ //
+ // Internally defined CPU Save State Registers. Not defined in PI SMM CPU Protocol.
+ //
+ {4, 4, SMM_CPU_OFFSET (x86.SMMRevId) , SMM_CPU_OFFSET (x64.SMMRevId) , 0 , FALSE}, // SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX = 1
+ {4, 4, SMM_CPU_OFFSET (x86.IOMisc) , SMM_CPU_OFFSET (x64.IOMisc) , 0 , FALSE}, // SMM_SAVE_STATE_REGISTER_IOMISC_INDEX = 2
+ {4, 8, SMM_CPU_OFFSET (x86.IOMemAddr) , SMM_CPU_OFFSET (x64.IOMemAddr) , SMM_CPU_OFFSET (x64.IOMemAddr) + 4, FALSE}, // SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX = 3
+
+ //
+ // CPU Save State registers defined in PI SMM CPU Protocol.
+ //
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.GdtBaseLoDword) , SMM_CPU_OFFSET (x64.GdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTBASE = 4
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.IdtBaseLoDword) , SMM_CPU_OFFSET (x64.IdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTBASE = 5
+ {0, 8, 0 , SMM_CPU_OFFSET (x64.LdtBaseLoDword) , SMM_CPU_OFFSET (x64.LdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTBASE = 6
+ {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTLIMIT = 7
+ {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTLIMIT = 8
+ {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTLIMIT = 9
+ {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTINFO = 10
+
+ {4, 4, SMM_CPU_OFFSET (x86._ES) , SMM_CPU_OFFSET (x64._ES) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_ES = 20
+ {4, 4, SMM_CPU_OFFSET (x86._CS) , SMM_CPU_OFFSET (x64._CS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CS = 21
+ {4, 4, SMM_CPU_OFFSET (x86._SS) , SMM_CPU_OFFSET (x64._SS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_SS = 22
+ {4, 4, SMM_CPU_OFFSET (x86._DS) , SMM_CPU_OFFSET (x64._DS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DS = 23
+ {4, 4, SMM_CPU_OFFSET (x86._FS) , SMM_CPU_OFFSET (x64._FS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_FS = 24
+ {4, 4, SMM_CPU_OFFSET (x86._GS) , SMM_CPU_OFFSET (x64._GS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GS = 25
+ {0, 4, 0 , SMM_CPU_OFFSET (x64._LDTR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTR_SEL = 26
+ {4, 4, SMM_CPU_OFFSET (x86._TR) , SMM_CPU_OFFSET (x64._TR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_TR_SEL = 27
+ {4, 8, SMM_CPU_OFFSET (x86._DR7) , SMM_CPU_OFFSET (x64._DR7) , SMM_CPU_OFFSET (x64._DR7) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR7 = 28
+ {4, 8, SMM_CPU_OFFSET (x86._DR6) , SMM_CPU_OFFSET (x64._DR6) , SMM_CPU_OFFSET (x64._DR6) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR6 = 29
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R8) , SMM_CPU_OFFSET (x64._R8) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R8 = 30
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R9) , SMM_CPU_OFFSET (x64._R9) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R9 = 31
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R10) , SMM_CPU_OFFSET (x64._R10) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R10 = 32
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R11) , SMM_CPU_OFFSET (x64._R11) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R11 = 33
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R12) , SMM_CPU_OFFSET (x64._R12) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R12 = 34
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R13) , SMM_CPU_OFFSET (x64._R13) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R13 = 35
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R14) , SMM_CPU_OFFSET (x64._R14) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R14 = 36
+ {0, 8, 0 , SMM_CPU_OFFSET (x64._R15) , SMM_CPU_OFFSET (x64._R15) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R15 = 37
+ {4, 8, SMM_CPU_OFFSET (x86._EAX) , SMM_CPU_OFFSET (x64._RAX) , SMM_CPU_OFFSET (x64._RAX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RAX = 38
+ {4, 8, SMM_CPU_OFFSET (x86._EBX) , SMM_CPU_OFFSET (x64._RBX) , SMM_CPU_OFFSET (x64._RBX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBX = 39
+ {4, 8, SMM_CPU_OFFSET (x86._ECX) , SMM_CPU_OFFSET (x64._RCX) , SMM_CPU_OFFSET (x64._RCX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RCX = 40
+ {4, 8, SMM_CPU_OFFSET (x86._EDX) , SMM_CPU_OFFSET (x64._RDX) , SMM_CPU_OFFSET (x64._RDX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDX = 41
+ {4, 8, SMM_CPU_OFFSET (x86._ESP) , SMM_CPU_OFFSET (x64._RSP) , SMM_CPU_OFFSET (x64._RSP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSP = 42
+ {4, 8, SMM_CPU_OFFSET (x86._EBP) , SMM_CPU_OFFSET (x64._RBP) , SMM_CPU_OFFSET (x64._RBP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBP = 43
+ {4, 8, SMM_CPU_OFFSET (x86._ESI) , SMM_CPU_OFFSET (x64._RSI) , SMM_CPU_OFFSET (x64._RSI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSI = 44
+ {4, 8, SMM_CPU_OFFSET (x86._EDI) , SMM_CPU_OFFSET (x64._RDI) , SMM_CPU_OFFSET (x64._RDI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDI = 45
+ {4, 8, SMM_CPU_OFFSET (x86._EIP) , SMM_CPU_OFFSET (x64._RIP) , SMM_CPU_OFFSET (x64._RIP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RIP = 46
+
+ {4, 8, SMM_CPU_OFFSET (x86._EFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RFLAGS = 51
+ {4, 8, SMM_CPU_OFFSET (x86._CR0) , SMM_CPU_OFFSET (x64._CR0) , SMM_CPU_OFFSET (x64._CR0) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR0 = 52
+ {4, 8, SMM_CPU_OFFSET (x86._CR3) , SMM_CPU_OFFSET (x64._CR3) , SMM_CPU_OFFSET (x64._CR3) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR3 = 53
+ {0, 4, 0 , SMM_CPU_OFFSET (x64._CR4) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR4 = 54
+};
+
+///
+/// Lookup table for the IOMisc width information
+///
+CONST CPU_SMM_SAVE_STATE_IO_WIDTH mSmmCpuIoWidth[] = {
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 0
+ { 1, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // SMM_IO_LENGTH_BYTE = 1
+ { 2, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT16 }, // SMM_IO_LENGTH_WORD = 2
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 3
+ { 4, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT32 }, // SMM_IO_LENGTH_DWORD = 4
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 5
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 6
+ { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 } // Undefined = 7
+};
+
+///
+/// Lookup table for the IOMisc type information
+///
+CONST EFI_SMM_SAVE_STATE_IO_TYPE mSmmCpuIoType[] = {
+ EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT, // SMM_IO_TYPE_OUT_DX = 0
+ EFI_SMM_SAVE_STATE_IO_TYPE_INPUT, // SMM_IO_TYPE_IN_DX = 1
+ EFI_SMM_SAVE_STATE_IO_TYPE_STRING, // SMM_IO_TYPE_OUTS = 2
+ EFI_SMM_SAVE_STATE_IO_TYPE_STRING, // SMM_IO_TYPE_INS = 3
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 4
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 5
+ EFI_SMM_SAVE_STATE_IO_TYPE_REP_PREFIX, // SMM_IO_TYPE_REP_OUTS = 6
+ EFI_SMM_SAVE_STATE_IO_TYPE_REP_PREFIX, // SMM_IO_TYPE_REP_INS = 7
+ EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT, // SMM_IO_TYPE_OUT_IMMEDIATE = 8
+ EFI_SMM_SAVE_STATE_IO_TYPE_INPUT, // SMM_IO_TYPE_OUT_IMMEDIATE = 9
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 10
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 11
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 12
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 13
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 14
+ (EFI_SMM_SAVE_STATE_IO_TYPE)0 // Undefined = 15
+};
+
+///
+/// The mode of the CPU at the time an SMI occurs
+///
+UINT8 mSmmSaveStateRegisterLma;
+
+/**
+ Read information from the CPU save state.
+
+ @param Register Specifies the CPU register to read form the save state.
+
+ @retval 0 Register is not valid
+ @retval >0 Index into mSmmCpuWidthOffset[] associated with Register
+
+**/
+UINTN
+GetRegisterIndex (
+ IN EFI_SMM_SAVE_STATE_REGISTER Register
+ )
+{
+ UINTN Index;
+ UINTN Offset;
+
+ for (Index = 0, Offset = SMM_SAVE_STATE_REGISTER_MAX_INDEX; mSmmCpuRegisterRanges[Index].Length != 0; Index++) {
+ if (Register >= mSmmCpuRegisterRanges[Index].Start && Register <= mSmmCpuRegisterRanges[Index].End) {
+ return Register - mSmmCpuRegisterRanges[Index].Start + Offset;
+ }
+ Offset += mSmmCpuRegisterRanges[Index].Length;
+ }
+ return 0;
+}
+
+/**
+ Read a CPU Save State register on the target processor.
+
+ This function abstracts the differences that whether the CPU Save State register is in the
+ IA32 CPU Save State Map or X64 CPU Save State Map.
+
+ This function supports reading a CPU Save State register in SMBase relocation handler.
+
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+ @param[in] Width The number of bytes to read from the CPU save state.
+ @param[out] Buffer Upon return, this holds the CPU register value read from the save state.
+
+ @retval EFI_SUCCESS The register was read from Save State.
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+ @retval EFI_INVALID_PARAMETER This or Buffer is NULL.
+
+**/
+EFI_STATUS
+ReadSaveStateRegisterByIndex (
+ IN UINTN CpuIndex,
+ IN UINTN RegisterIndex,
+ IN UINTN Width,
+ OUT VOID *Buffer
+ )
+{
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;
+
+ if (RegisterIndex == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ CpuSaveState = gSmst->CpuSaveState[CpuIndex];
+
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
+ //
+ // If 32-bit mode width is zero, then the specified register can not be accessed
+ //
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed
+ //
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Write return buffer
+ //
+ ASSERT(CpuSaveState != NULL);
+ CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Width);
+ } else {
+ //
+ // If 64-bit mode width is zero, then the specified register can not be accessed
+ //
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed
+ //
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Write lower 32-bits of return buffer
+ //
+ CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, MIN(4, Width));
+ if (Width >= 4) {
+ //
+ // Write upper 32-bits of return buffer
+ //
+ CopyMem((UINT8 *)Buffer + 4, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, Width - 4);
+ }
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Read a CPU Save State register on the target processor.
+
+ This function abstracts the differences that whether the CPU Save State register is in the
+ IA32 CPU Save State Map or X64 CPU Save State Map.
+
+ This function supports reading a CPU Save State register in SMBase relocation handler.
+
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+ @param[in] Width The number of bytes to read from the CPU save state.
+ @param[out] Buffer Upon return, this holds the CPU register value read from the save state.
+
+ @retval EFI_SUCCESS The register was read from Save State.
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+ @retval EFI_INVALID_PARAMETER Buffer is NULL, or Width does not meet requirement per Register type.
+
+**/
+EFI_STATUS
+EFIAPI
+ReadSaveStateRegister (
+ IN UINTN CpuIndex,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN Width,
+ OUT VOID *Buffer
+ )
+{
+ UINT32 SmmRevId;
+ SMRAM_SAVE_STATE_IOMISC IoMisc;
+ EFI_SMM_SAVE_STATE_IO_INFO *IoInfo;
+
+ //
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_LMA
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {
+ //
+ // Only byte access is supported for this register
+ //
+ if (Width != 1) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ *(UINT8 *)Buffer = mSmmSaveStateRegisterLma;
+
+ return EFI_SUCCESS;
+ }
+
+ //
+ // Check for special EFI_SMM_SAVE_STATE_REGISTER_IO
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {
+ //
+ // Get SMM Revision ID
+ //
+ ReadSaveStateRegisterByIndex (CpuIndex, SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX, sizeof(SmmRevId), &SmmRevId);
+
+ //
+ // See if the CPU supports the IOMisc register in the save state
+ //
+ if (SmmRevId < SMRAM_SAVE_STATE_MIN_REV_ID_IOMISC) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Get the IOMisc register value
+ //
+ ReadSaveStateRegisterByIndex (CpuIndex, SMM_SAVE_STATE_REGISTER_IOMISC_INDEX, sizeof(IoMisc.Uint32), &IoMisc.Uint32);
+
+ //
+ // Check for the SMI_FLAG in IOMisc
+ //
+ if (IoMisc.Bits.SmiFlag == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Only support IN/OUT, but not INS/OUTS/REP INS/REP OUTS.
+ //
+ if ((mSmmCpuIoType[IoMisc.Bits.Type] != EFI_SMM_SAVE_STATE_IO_TYPE_INPUT) &&
+ (mSmmCpuIoType[IoMisc.Bits.Type] != EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT)) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Compute index for the I/O Length and I/O Type lookup tables
+ //
+ if (mSmmCpuIoWidth[IoMisc.Bits.Length].Width == 0 || mSmmCpuIoType[IoMisc.Bits.Type] == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Make sure the incoming buffer is large enough to hold IoInfo before accessing
+ //
+ if (Width < sizeof (EFI_SMM_SAVE_STATE_IO_INFO)) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Zero the IoInfo structure that will be returned in Buffer
+ //
+ IoInfo = (EFI_SMM_SAVE_STATE_IO_INFO *)Buffer;
+ ZeroMem (IoInfo, sizeof(EFI_SMM_SAVE_STATE_IO_INFO));
+
+ //
+ // Use lookup tables to help fill in all the fields of the IoInfo structure
+ //
+ IoInfo->IoPort = (UINT16)IoMisc.Bits.Port;
+ IoInfo->IoWidth = mSmmCpuIoWidth[IoMisc.Bits.Length].IoWidth;
+ IoInfo->IoType = mSmmCpuIoType[IoMisc.Bits.Type];
+ ReadSaveStateRegister (CpuIndex, EFI_SMM_SAVE_STATE_REGISTER_RAX, mSmmCpuIoWidth[IoMisc.Bits.Length].Width, &IoInfo->IoData);
+ return EFI_SUCCESS;
+ }
+
+ //
+ // Convert Register to a register lookup table index
+ //
+ return ReadSaveStateRegisterByIndex (CpuIndex, GetRegisterIndex (Register), Width, Buffer);
+}
+
+/**
+ Write value to a CPU Save State register on the target processor.
+
+ This function abstracts the differences that whether the CPU Save State register is in the
+ IA32 CPU Save State Map or X64 CPU Save State Map.
+
+ This function supports writing a CPU Save State register in SMBase relocation handler.
+
+ @param[in] CpuIndex Specifies the zero-based index of the CPU save state.
+ @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table.
+ @param[in] Width The number of bytes to read from the CPU save state.
+ @param[in] Buffer Upon entry, this holds the new CPU register value.
+
+ @retval EFI_SUCCESS The register was written to Save State.
+ @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor.
+ @retval EFI_INVALID_PARAMETER ProcessorIndex or Width is not correct.
+
+**/
+EFI_STATUS
+EFIAPI
+WriteSaveStateRegister (
+ IN UINTN CpuIndex,
+ IN EFI_SMM_SAVE_STATE_REGISTER Register,
+ IN UINTN Width,
+ IN CONST VOID *Buffer
+ )
+{
+ UINTN RegisterIndex;
+ SMRAM_SAVE_STATE_MAP *CpuSaveState;
+
+ //
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_LMA are ignored
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) {
+ return EFI_SUCCESS;
+ }
+
+ //
+ // Writes to EFI_SMM_SAVE_STATE_REGISTER_IO are not supported
+ //
+ if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // Convert Register to a register lookup table index
+ //
+ RegisterIndex = GetRegisterIndex (Register);
+ if (RegisterIndex == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ CpuSaveState = gSmst->CpuSaveState[CpuIndex];
+
+ //
+ // Do not write non-writable SaveState, because it will cause exception.
+ //
+ if (!mSmmCpuWidthOffset[RegisterIndex].Writeable) {
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Check CPU mode
+ //
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
+ //
+ // If 32-bit mode width is zero, then the specified register can not be accessed
+ //
+ if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed
+ //
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) {
+ return EFI_INVALID_PARAMETER;
+ }
+ //
+ // Write SMM State register
+ //
+ ASSERT (CpuSaveState != NULL);
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Buffer, Width);
+ } else {
+ //
+ // If 64-bit mode width is zero, then the specified register can not be accessed
+ //
+ if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) {
+ return EFI_NOT_FOUND;
+ }
+
+ //
+ // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed
+ //
+ if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Write lower 32-bits of SMM State register
+ //
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, Buffer, MIN (4, Width));
+ if (Width >= 4) {
+ //
+ // Write upper 32-bits of SMM State register
+ //
+ CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, (UINT8 *)Buffer + 4, Width - 4);
+ }
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Hook the code executed immediately after an RSM instruction on the currently
+ executing CPU. The mode of code executed immediately after RSM must be
+ detected, and the appropriate hook must be selected. Always clear the auto
+ HALT restart flag if it is set.
+
+ @param[in] CpuIndex The processor index for the currently
+ executing CPU.
+ @param[in] CpuState Pointer to SMRAM Save State Map for the
+ currently executing CPU.
+ @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to
+ 32-bit mode from 64-bit SMM.
+ @param[in] NewInstructionPointer Instruction pointer to use if resuming to
+ same mode as SMM.
+
+ @retval The value of the original instruction pointer before it was hooked.
+
+**/
+UINT64
+EFIAPI
+HookReturnFromSmm (
+ IN UINTN CpuIndex,
+ SMRAM_SAVE_STATE_MAP *CpuState,
+ UINT64 NewInstructionPointer32,
+ UINT64 NewInstructionPointer
+ )
+{
+ UINT64 OriginalInstructionPointer;
+
+ OriginalInstructionPointer = SmmCpuFeaturesHookReturnFromSmm (
+ CpuIndex,
+ CpuState,
+ NewInstructionPointer32,
+ NewInstructionPointer
+ );
+ if (OriginalInstructionPointer != 0) {
+ return OriginalInstructionPointer;
+ }
+
+ if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
+ OriginalInstructionPointer = (UINT64)CpuState->x86._EIP;
+ CpuState->x86._EIP = (UINT32)NewInstructionPointer;
+ //
+ // Clear the auto HALT restart flag so the RSM instruction returns
+ // program control to the instruction following the HLT instruction.
+ //
+ if ((CpuState->x86.AutoHALTRestart & BIT0) != 0) {
+ CpuState->x86.AutoHALTRestart &= ~BIT0;
+ }
+ } else {
+ OriginalInstructionPointer = CpuState->x64._RIP;
+ if ((CpuState->x64.IA32_EFER & LMA) == 0) {
+ CpuState->x64._RIP = (UINT32)NewInstructionPointer32;
+ } else {
+ CpuState->x64._RIP = (UINT32)NewInstructionPointer;
+ }
+ //
+ // Clear the auto HALT restart flag so the RSM instruction returns
+ // program control to the instruction following the HLT instruction.
+ //
+ if ((CpuState->x64.AutoHALTRestart & BIT0) != 0) {
+ CpuState->x64.AutoHALTRestart &= ~BIT0;
+ }
+ }
+ return OriginalInstructionPointer;
+}
+
+/**
+ Get the size of the SMI Handler in bytes.
+
+ @retval The size, in bytes, of the SMI Handler.
+
+**/
+UINTN
+EFIAPI
+GetSmiHandlerSize (
+ VOID
+ )
+{
+ UINTN Size;
+
+ Size = SmmCpuFeaturesGetSmiHandlerSize ();
+ if (Size != 0) {
+ return Size;
+ }
+ return gcSmiHandlerSize;
+}
+
+/**
+ Install the SMI handler for the CPU specified by CpuIndex. This function
+ is called by the CPU that was elected as monarch during System Management
+ Mode initialization.
+
+ @param[in] CpuIndex The index of the CPU to install the custom SMI handler.
+ The value must be between 0 and the NumberOfCpus field
+ in the System Management System Table (SMST).
+ @param[in] SmBase The SMBASE address for the CPU specified by CpuIndex.
+ @param[in] SmiStack The stack to use when an SMI is processed by the
+ the CPU specified by CpuIndex.
+ @param[in] StackSize The size, in bytes, if the stack used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] GdtBase The base address of the GDT to use when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] GdtSize The size, in bytes, of the GDT used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] IdtBase The base address of the IDT to use when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] IdtSize The size, in bytes, of the IDT used when an SMI is
+ processed by the CPU specified by CpuIndex.
+ @param[in] Cr3 The base address of the page tables to use when an SMI
+ is processed by the CPU specified by CpuIndex.
+**/
+VOID
+EFIAPI
+InstallSmiHandler (
+ IN UINTN CpuIndex,
+ IN UINT32 SmBase,
+ IN VOID *SmiStack,
+ IN UINTN StackSize,
+ IN UINTN GdtBase,
+ IN UINTN GdtSize,
+ IN UINTN IdtBase,
+ IN UINTN IdtSize,
+ IN UINT32 Cr3
+ )
+{
+ PROCESSOR_SMM_DESCRIPTOR *Psd;
+ UINT32 CpuSmiStack;
+
+ //
+ // Initialize PROCESSOR_SMM_DESCRIPTOR
+ //
+ Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)((UINTN)SmBase + SMM_PSD_OFFSET);
+ CopyMem (Psd, &gcPsd, sizeof (gcPsd));
+ Psd->SmmGdtPtr = (UINT64)GdtBase;
+ Psd->SmmGdtSize = (UINT32)GdtSize;
+
+ if (SmmCpuFeaturesGetSmiHandlerSize () != 0) {
+ //
+ // Install SMI handler provided by library
+ //
+ SmmCpuFeaturesInstallSmiHandler (
+ CpuIndex,
+ SmBase,
+ SmiStack,
+ StackSize,
+ GdtBase,
+ GdtSize,
+ IdtBase,
+ IdtSize,
+ Cr3
+ );
+ return;
+ }
+
+ InitShadowStack (CpuIndex, (VOID *)((UINTN)SmiStack + StackSize));
+
+ //
+ // Initialize values in template before copy
+ //
+ CpuSmiStack = (UINT32)((UINTN)SmiStack + StackSize - sizeof (UINTN));
+ PatchInstructionX86 (gPatchSmiStack, CpuSmiStack, 4);
+ PatchInstructionX86 (gPatchSmiCr3, Cr3, 4);
+ PatchInstructionX86 (gPatchSmbase, SmBase, 4);
+ gSmiHandlerIdtr.Base = IdtBase;
+ gSmiHandlerIdtr.Limit = (UINT16)(IdtSize - 1);
+
+ //
+ // Set the value at the top of the CPU stack to the CPU Index
+ //
+ *(UINTN*)(UINTN)CpuSmiStack = CpuIndex;
+
+ //
+ // Copy template to CPU specific SMI handler location
+ //
+ CopyMem (
+ (VOID*)((UINTN)SmBase + SMM_HANDLER_OFFSET),
+ (VOID*)gcSmiHandlerTemplate,
+ gcSmiHandlerSize
+ );
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c
new file mode 100644
index 00000000..1d79a50c
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c
@@ -0,0 +1,110 @@
+/** @file
+SMM Timer feature support
+
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+UINT64 mTimeoutTicker = 0;
+//
+// Number of counts in a roll-over cycle of the performance counter.
+//
+UINT64 mCycle = 0;
+//
+// Flag to indicate the performance counter is count-up or count-down.
+//
+BOOLEAN mCountDown;
+
+/**
+ Initialize Timer for SMM AP Sync.
+
+**/
+VOID
+InitializeSmmTimer (
+ VOID
+ )
+{
+ UINT64 TimerFrequency;
+ UINT64 Start;
+ UINT64 End;
+
+ TimerFrequency = GetPerformanceCounterProperties (&Start, &End);
+ mTimeoutTicker = DivU64x32 (
+ MultU64x64(TimerFrequency, PcdGet64 (PcdCpuSmmApSyncTimeout)),
+ 1000 * 1000
+ );
+ if (End < Start) {
+ mCountDown = TRUE;
+ mCycle = Start - End;
+ } else {
+ mCountDown = FALSE;
+ mCycle = End - Start;
+ }
+}
+
+/**
+ Start Timer for SMM AP Sync.
+
+**/
+UINT64
+EFIAPI
+StartSyncTimer (
+ VOID
+ )
+{
+ return GetPerformanceCounter ();
+}
+
+
+/**
+ Check if the SMM AP Sync timer is timeout.
+
+ @param Timer The start timer from the begin.
+
+**/
+BOOLEAN
+EFIAPI
+IsSyncTimerTimeout (
+ IN UINT64 Timer
+ )
+{
+ UINT64 CurrentTimer;
+ UINT64 Delta;
+
+ CurrentTimer = GetPerformanceCounter ();
+ //
+ // We need to consider the case that CurrentTimer is equal to Timer
+ // when some timer runs too slow and CPU runs fast. We think roll over
+ // condition does not happen on this case.
+ //
+ if (mCountDown) {
+ //
+ // The performance counter counts down. Check for roll over condition.
+ //
+ if (CurrentTimer <= Timer) {
+ Delta = Timer - CurrentTimer;
+ } else {
+ //
+ // Handle one roll-over.
+ //
+ Delta = mCycle - (CurrentTimer - Timer) + 1;
+ }
+ } else {
+ //
+ // The performance counter counts up. Check for roll over condition.
+ //
+ if (CurrentTimer >= Timer) {
+ Delta = CurrentTimer - Timer;
+ } else {
+ //
+ // Handle one roll-over.
+ //
+ Delta = mCycle - (Timer - CurrentTimer) + 1;
+ }
+ }
+
+ return (BOOLEAN) (Delta >= mTimeoutTicker);
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm
new file mode 100644
index 00000000..99172364
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm
@@ -0,0 +1,34 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;-------------------------------------------------------------------------------
+
+%include "Nasm.inc"
+
+DEFAULT REL
+SECTION .text
+
+global ASM_PFX(DisableCet)
+ASM_PFX(DisableCet):
+
+ ; Skip the pushed data for call
+ mov rax, 1
+ INCSSP_RAX
+
+ mov rax, cr4
+ btr eax, 23 ; clear CET
+ mov cr4, rax
+ ret
+
+global ASM_PFX(EnableCet)
+ASM_PFX(EnableCet):
+
+ mov rax, cr4
+ bts eax, 23 ; set CET
+ mov cr4, rax
+
+ ; use jmp to skip the check for ret
+ pop rax
+ jmp rax
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm
new file mode 100644
index 00000000..3eff0772
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm
@@ -0,0 +1,189 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; MpFuncs.nasm
+;
+; Abstract:
+;
+; This is the assembly code for Multi-processor S3 support
+;
+;-------------------------------------------------------------------------------
+
+%define VacantFlag 0x0
+%define NotVacantFlag 0xff
+
+%define LockLocation RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+%define StackStartAddressLocation LockLocation + 0x8
+%define StackSizeLocation LockLocation + 0x10
+%define CProcedureLocation LockLocation + 0x18
+%define GdtrLocation LockLocation + 0x20
+%define IdtrLocation LockLocation + 0x2A
+%define BufferStartLocation LockLocation + 0x34
+%define Cr3OffsetLocation LockLocation + 0x38
+%define InitializeFloatingPointUnitsAddress LockLocation + 0x3C
+
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+;procedure serializes all the AP processors through an Init sequence. It must be
+;noted that APs arrive here very raw...ie: real mode, no stack.
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+;IS IN MACHINE CODE.
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+;text SEGMENT
+DEFAULT REL
+SECTION .text
+
+BITS 16
+global ASM_PFX(RendezvousFunnelProc)
+ASM_PFX(RendezvousFunnelProc):
+RendezvousFunnelProcStart:
+
+; At this point CS = 0x(vv00) and ip= 0x0.
+
+ mov ax, cs
+ mov ds, ax
+ mov es, ax
+ mov ss, ax
+ xor ax, ax
+ mov fs, ax
+ mov gs, ax
+
+flat32Start:
+
+ mov si, BufferStartLocation
+ mov edx,dword [si] ; EDX is keeping the start address of wakeup buffer
+
+ mov si, Cr3OffsetLocation
+ mov ecx,dword [si] ; ECX is keeping the value of CR3
+
+ mov si, GdtrLocation
+o32 lgdt [cs:si]
+
+ mov si, IdtrLocation
+o32 lidt [cs:si]
+
+ xor ax, ax
+ mov ds, ax
+
+ mov eax, cr0 ; Get control register 0
+ or eax, 0x000000001 ; Set PE bit (bit #0)
+ mov cr0, eax
+
+FLAT32_JUMP:
+
+a32 jmp dword 0x20:0x0
+
+BITS 32
+PMODE_ENTRY: ; protected mode entry point
+
+ mov ax, 0x18
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax ; Flat mode setup.
+
+ mov eax, cr4
+ bts eax, 5
+ mov cr4, eax
+
+ mov cr3, ecx
+
+ mov esi, edx ; Save wakeup buffer address
+
+ mov ecx, 0xc0000080 ; EFER MSR number.
+ rdmsr ; Read EFER.
+ bts eax, 8 ; Set LME=1.
+ wrmsr ; Write EFER.
+
+ mov eax, cr0 ; Read CR0.
+ bts eax, 31 ; Set PG=1.
+ mov cr0, eax ; Write CR0.
+
+LONG_JUMP:
+
+a16 jmp dword 0x38:0x0
+
+BITS 64
+LongModeStart:
+
+ mov ax, 0x30
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov ss, ax
+
+ mov edi, esi
+ add edi, LockLocation
+ mov al, NotVacantFlag
+TestLock:
+ xchg byte [edi], al
+ cmp al, NotVacantFlag
+ jz TestLock
+
+ProgramStack:
+
+ mov edi, esi
+ add edi, StackSizeLocation
+ mov rax, qword [edi]
+ mov edi, esi
+ add edi, StackStartAddressLocation
+ add rax, qword [edi]
+ mov rsp, rax
+ mov qword [edi], rax
+
+Releaselock:
+
+ mov al, VacantFlag
+ mov edi, esi
+ add edi, LockLocation
+ xchg byte [edi], al
+
+ ;
+ ; Call assembly function to initialize FPU.
+ ;
+ mov rax, qword [esi + InitializeFloatingPointUnitsAddress]
+ sub rsp, 0x20
+ call rax
+ add rsp, 0x20
+
+ ;
+ ; Call C Function
+ ;
+ mov edi, esi
+ add edi, CProcedureLocation
+ mov rax, qword [edi]
+
+ test rax, rax
+ jz GoToSleep
+
+ sub rsp, 0x20
+ call rax
+ add rsp, 0x20
+
+GoToSleep:
+ cli
+ hlt
+ jmp $-2
+
+RendezvousFunnelProcEnd:
+
+;-------------------------------------------------------------------------------------
+; AsmGetAddressMap (&AddressMap);
+;-------------------------------------------------------------------------------------
+; comments here for definition of address map
+global ASM_PFX(AsmGetAddressMap)
+ASM_PFX(AsmGetAddressMap):
+ lea rax, [RendezvousFunnelProcStart]
+ mov qword [rcx], rax
+ mov qword [rcx+0x8], PMODE_ENTRY - RendezvousFunnelProcStart
+ mov qword [rcx+0x10], FLAT32_JUMP - RendezvousFunnelProcStart
+ mov qword [rcx+0x18], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+ mov qword [rcx+0x20], LongModeStart - RendezvousFunnelProcStart
+ mov qword [rcx+0x28], LONG_JUMP - RendezvousFunnelProcStart
+ ret
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
new file mode 100644
index 00000000..a9a995db
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -0,0 +1,1325 @@
+/** @file
+Page Fault (#PF) handler for X64 processors
+
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+#define PAGE_TABLE_PAGES 8
+#define ACC_MAX_BIT BIT3
+
+extern UINTN mSmmShadowStackSize;
+
+LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
+BOOLEAN m1GPageTableSupport = FALSE;
+BOOLEAN mCpuSmmRestrictedMemoryAccess;
+BOOLEAN m5LevelPagingNeeded;
+X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
+
+/**
+ Disable CET.
+**/
+VOID
+EFIAPI
+DisableCet (
+ VOID
+ );
+
+/**
+ Enable CET.
+**/
+VOID
+EFIAPI
+EnableCet (
+ VOID
+ );
+
+/**
+ Check if 1-GByte pages is supported by processor or not.
+
+ @retval TRUE 1-GByte pages is supported.
+ @retval FALSE 1-GByte pages is not supported.
+
+**/
+BOOLEAN
+Is1GPageSupport (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT32 RegEdx;
+
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000001) {
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & BIT26) != 0) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+/**
+ The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
+ the max physical address bits is bigger than 48. Because 4-level paging can support
+ to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
+ with max physical address bits <= 48.
+
+ @retval TRUE 5-level paging enabling is needed.
+ @retval FALSE 5-level paging enabling is not needed.
+**/
+BOOLEAN
+Is5LevelPagingNeeded (
+ VOID
+ )
+{
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
+ UINT32 MaxExtendedFunctionId;
+
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
+ if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
+ } else {
+ VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
+ }
+ AsmCpuidEx (
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
+ NULL, NULL, &ExtFeatureEcx.Uint32, NULL
+ );
+ DEBUG ((
+ DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
+ VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
+ ));
+
+ if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
+ ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+/**
+ Get page table base address and the depth of the page table.
+
+ @param[out] Base Page table base address.
+ @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
+**/
+VOID
+GetPageTable (
+ OUT UINTN *Base,
+ OUT BOOLEAN *FiveLevels OPTIONAL
+ )
+{
+ IA32_CR4 Cr4;
+
+ if (mInternalCr3 == 0) {
+ *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
+ if (FiveLevels != NULL) {
+ Cr4.UintN = AsmReadCr4 ();
+ *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);
+ }
+ return;
+ }
+
+ *Base = mInternalCr3;
+ if (FiveLevels != NULL) {
+ *FiveLevels = m5LevelPagingNeeded;
+ }
+}
+
+/**
+ Set sub-entries number in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] SubEntryNum Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+
+**/
+VOID
+SetSubEntriesNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 SubEntryNum
+ )
+{
+ //
+ // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
+ //
+ *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
+}
+
+/**
+ Return sub-entries number in entry.
+
+ @param[in] Entry Pointer to entry
+
+ @return Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+**/
+UINT64
+GetSubEntriesNum (
+ IN UINT64 *Entry
+ )
+{
+ //
+ // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
+ //
+ return BitFieldRead64 (*Entry, 52, 60);
+}
+
+/**
+ Calculate the maximum support address.
+
+ @return the maximum support address.
+**/
+UINT8
+CalculateMaximumSupportAddress (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT8 PhysicalAddressBits;
+ VOID *Hob;
+
+ //
+ // Get physical address bits supported.
+ //
+ Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
+ if (Hob != NULL) {
+ PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
+ } else {
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000008) {
+ AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
+ PhysicalAddressBits = (UINT8) RegEax;
+ } else {
+ PhysicalAddressBits = 36;
+ }
+ }
+ return PhysicalAddressBits;
+}
+
+/**
+ Set static page table.
+
+ @param[in] PageTable Address of page table.
+ @param[in] PhysicalAddressBits The maximum physical address bits supported.
+**/
+VOID
+SetStaticPageTable (
+ IN UINTN PageTable,
+ IN UINT8 PhysicalAddressBits
+ )
+{
+ UINT64 PageAddress;
+ UINTN NumberOfPml5EntriesNeeded;
+ UINTN NumberOfPml4EntriesNeeded;
+ UINTN NumberOfPdpEntriesNeeded;
+ UINTN IndexOfPml5Entries;
+ UINTN IndexOfPml4Entries;
+ UINTN IndexOfPdpEntries;
+ UINTN IndexOfPageDirectoryEntries;
+ UINT64 *PageMapLevel5Entry;
+ UINT64 *PageMapLevel4Entry;
+ UINT64 *PageMap;
+ UINT64 *PageDirectoryPointerEntry;
+ UINT64 *PageDirectory1GEntry;
+ UINT64 *PageDirectoryEntry;
+
+ //
+ // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
+ // when 5-Level Paging is disabled.
+ //
+ ASSERT (PhysicalAddressBits <= 52);
+ if (!m5LevelPagingNeeded && PhysicalAddressBits > 48) {
+ PhysicalAddressBits = 48;
+ }
+
+ NumberOfPml5EntriesNeeded = 1;
+ if (PhysicalAddressBits > 48) {
+ NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 48);
+ PhysicalAddressBits = 48;
+ }
+
+ NumberOfPml4EntriesNeeded = 1;
+ if (PhysicalAddressBits > 39) {
+ NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 39);
+ PhysicalAddressBits = 39;
+ }
+
+ NumberOfPdpEntriesNeeded = 1;
+ ASSERT (PhysicalAddressBits > 30);
+ NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 30);
+
+ //
+ // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
+ //
+ PageMap = (VOID *) PageTable;
+
+ PageMapLevel4Entry = PageMap;
+ PageMapLevel5Entry = NULL;
+ if (m5LevelPagingNeeded) {
+ //
+ // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
+ //
+ PageMapLevel5Entry = PageMap;
+ }
+ PageAddress = 0;
+
+ for ( IndexOfPml5Entries = 0
+ ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
+ ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
+ //
+ // Each PML5 entry points to a page of PML4 entires.
+ // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
+ // When 5-Level Paging is disabled, below allocation happens only once.
+ //
+ if (m5LevelPagingNeeded) {
+ PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
+ if (PageMapLevel4Entry == NULL) {
+ PageMapLevel4Entry = AllocatePageTableMemory (1);
+ ASSERT(PageMapLevel4Entry != NULL);
+ ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
+
+ *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+ }
+
+ for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
+ //
+ // Each PML4 entry points to a page of Page Directory Pointer entries.
+ //
+ PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
+ if (PageDirectoryPointerEntry == NULL) {
+ PageDirectoryPointerEntry = AllocatePageTableMemory (1);
+ ASSERT(PageDirectoryPointerEntry != NULL);
+ ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
+
+ *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+
+ if (m1GPageTableSupport) {
+ PageDirectory1GEntry = PageDirectoryPointerEntry;
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
+ if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
+ //
+ // Skip the < 4G entries
+ //
+ continue;
+ }
+ //
+ // Fill in the Page Directory entries
+ //
+ *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+ }
+ } else {
+ PageAddress = BASE_4GB;
+ for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
+ if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
+ //
+ // Skip the < 4G entries
+ //
+ continue;
+ }
+ //
+ // Each Directory Pointer entries points to a page of Page Directory entires.
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
+ //
+ PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
+ if (PageDirectoryEntry == NULL) {
+ PageDirectoryEntry = AllocatePageTableMemory (1);
+ ASSERT(PageDirectoryEntry != NULL);
+ ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
+
+ //
+ // Fill in a Page Directory Pointer Entries
+ //
+ *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
+ //
+ // Fill in the Page Directory entries
+ //
+ *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ Create PageTable for SMM use.
+
+ @return The address of PML4 (to set CR3).
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS Pages;
+ UINT64 *PTEntry;
+ LIST_ENTRY *FreePage;
+ UINTN Index;
+ UINTN PageFaultHandlerHookAddress;
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
+ EFI_STATUS Status;
+ UINT64 *Pml4Entry;
+ UINT64 *Pml5Entry;
+
+ //
+ // Initialize spin lock
+ //
+ InitializeSpinLock (mPFLock);
+
+ mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
+ m1GPageTableSupport = Is1GPageSupport ();
+ m5LevelPagingNeeded = Is5LevelPagingNeeded ();
+ mPhysicalAddressBits = CalculateMaximumSupportAddress ();
+ PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
+ DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
+ DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
+ DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
+ //
+ // Generate PAE page table for the first 4GB memory space
+ //
+ Pages = Gen4GPageTable (FALSE);
+
+ //
+ // Set IA32_PG_PMNT bit to mask this entry
+ //
+ PTEntry = (UINT64*)(UINTN)Pages;
+ for (Index = 0; Index < 4; Index++) {
+ PTEntry[Index] |= IA32_PG_PMNT;
+ }
+
+ //
+ // Fill Page-Table-Level4 (PML4) entry
+ //
+ Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
+ ASSERT (Pml4Entry != NULL);
+ *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
+
+ //
+ // Set sub-entries number
+ //
+ SetSubEntriesNum (Pml4Entry, 3);
+ PTEntry = Pml4Entry;
+
+ if (m5LevelPagingNeeded) {
+ //
+ // Fill PML5 entry
+ //
+ Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
+ ASSERT (Pml5Entry != NULL);
+ *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
+ //
+ // Set sub-entries number
+ //
+ SetSubEntriesNum (Pml5Entry, 1);
+ PTEntry = Pml5Entry;
+ }
+
+ if (mCpuSmmRestrictedMemoryAccess) {
+ //
+ // When access to non-SMRAM memory is restricted, create page table
+ // that covers all memory space.
+ //
+ SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
+ } else {
+ //
+ // Add pages to page pool
+ //
+ FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
+ ASSERT (FreePage != NULL);
+ for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
+ InsertTailList (&mPagePool, FreePage);
+ FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
+ HEAP_GUARD_NONSTOP_MODE ||
+ NULL_DETECTION_NONSTOP_MODE) {
+ //
+ // Set own Page Fault entry instead of the default one, because SMM Profile
+ // feature depends on IRET instruction to do Single Step
+ //
+ PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
+ IdtEntry->Bits.Reserved_0 = 0;
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
+ IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
+ IdtEntry->Bits.Reserved_1 = 0;
+ } else {
+ //
+ // Register Smm Page Fault Handler
+ //
+ Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ //
+ // Additional SMM IDT initialization for SMM stack guard
+ //
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ InitializeIDTSmmStackGuard ();
+ }
+
+ //
+ // Return the address of PML4/PML5 (to set CR3)
+ //
+ return (UINT32)(UINTN)PTEntry;
+}
+
+/**
+ Set access record in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] Acc Access record value
+
+**/
+VOID
+SetAccNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 Acc
+ )
+{
+ //
+ // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
+ //
+ *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
+}
+
+/**
+ Return access record in entry.
+
+ @param[in] Entry Pointer to entry
+
+ @return Access record value.
+
+**/
+UINT64
+GetAccNum (
+ IN UINT64 *Entry
+ )
+{
+ //
+ // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
+ //
+ return BitFieldRead64 (*Entry, 9, 11);
+}
+
+/**
+ Return and update the access record in entry.
+
+ @param[in, out] Entry Pointer to entry
+
+ @return Access record value.
+
+**/
+UINT64
+GetAndUpdateAccNum (
+ IN OUT UINT64 *Entry
+ )
+{
+ UINT64 Acc;
+
+ Acc = GetAccNum (Entry);
+ if ((*Entry & IA32_PG_A) != 0) {
+ //
+ // If this entry has been accessed, clear access flag in Entry and update access record
+ // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
+ //
+ *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
+ SetAccNum (Entry, 0x7);
+ return (0x7 + ACC_MAX_BIT);
+ } else {
+ if (Acc != 0) {
+ //
+ // If the access record is not the smallest value 0, minus 1 and update the access record field
+ //
+ SetAccNum (Entry, Acc - 1);
+ }
+ }
+ return Acc;
+}
+
+/**
+ Reclaim free pages for PageFault handler.
+
+ Search the whole entries tree to find the leaf entry that has the smallest
+ access record value. Insert the page pointed by this leaf entry into the
+ page pool. And check its upper entries if need to be inserted into the page
+ pool or not.
+
+**/
+VOID
+ReclaimPages (
+ VOID
+ )
+{
+ UINT64 Pml5Entry;
+ UINT64 *Pml5;
+ UINT64 *Pml4;
+ UINT64 *Pdpt;
+ UINT64 *Pdt;
+ UINTN Pml5Index;
+ UINTN Pml4Index;
+ UINTN PdptIndex;
+ UINTN PdtIndex;
+ UINTN MinPml5;
+ UINTN MinPml4;
+ UINTN MinPdpt;
+ UINTN MinPdt;
+ UINT64 MinAcc;
+ UINT64 Acc;
+ UINT64 SubEntriesNum;
+ BOOLEAN PML4EIgnore;
+ BOOLEAN PDPTEIgnore;
+ UINT64 *ReleasePageAddress;
+ IA32_CR4 Cr4;
+ BOOLEAN Enable5LevelPaging;
+ UINT64 PFAddress;
+ UINT64 PFAddressPml5Index;
+ UINT64 PFAddressPml4Index;
+ UINT64 PFAddressPdptIndex;
+ UINT64 PFAddressPdtIndex;
+
+ Pml4 = NULL;
+ Pdpt = NULL;
+ Pdt = NULL;
+ MinAcc = (UINT64)-1;
+ MinPml4 = (UINTN)-1;
+ MinPml5 = (UINTN)-1;
+ MinPdpt = (UINTN)-1;
+ MinPdt = (UINTN)-1;
+ Acc = 0;
+ ReleasePageAddress = 0;
+ PFAddress = AsmReadCr2 ();
+ PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
+ PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
+ PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
+ PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+ Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
+
+ if (!Enable5LevelPaging) {
+ //
+ // Create one fake PML5 entry for 4-Level Paging
+ // so that the page table parsing logic only handles 5-Level page structure.
+ //
+ Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
+ Pml5 = &Pml5Entry;
+ }
+
+ //
+ // First, find the leaf entry has the smallest access record value
+ //
+ for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
+ if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PML5 entry is not present or is masked, skip it
+ //
+ continue;
+ }
+ Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
+ for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
+ if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PML4 entry is not present or is masked, skip it
+ //
+ continue;
+ }
+ Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
+ PML4EIgnore = FALSE;
+ for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
+ if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PDPT entry is not present or is masked, skip it
+ //
+ if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PDPT entry is masked, we will ignore checking the PML4 entry
+ //
+ PML4EIgnore = TRUE;
+ }
+ continue;
+ }
+ if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
+ //
+ // It's not 1-GByte pages entry, it should be a PDPT entry,
+ // we will not check PML4 entry more
+ //
+ PML4EIgnore = TRUE;
+ Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
+ PDPTEIgnore = FALSE;
+ for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
+ if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PD entry is not present or is masked, skip it
+ //
+ if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PD entry is masked, we will not PDPT entry more
+ //
+ PDPTEIgnore = TRUE;
+ }
+ continue;
+ }
+ if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
+ //
+ // It's not 2 MByte page table entry, it should be PD entry
+ // we will find the entry has the smallest access record value
+ //
+ PDPTEIgnore = TRUE;
+ if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
+ Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
+ Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
+ if (Acc < MinAcc) {
+ //
+ // If the PD entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = PdptIndex;
+ MinPdt = PdtIndex;
+ ReleasePageAddress = Pdt + PdtIndex;
+ }
+ }
+ }
+ }
+ if (!PDPTEIgnore) {
+ //
+ // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
+ // it should only has the entries point to 2 MByte Pages
+ //
+ if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
+ Pml5Index != PFAddressPml5Index) {
+ Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
+ if (Acc < MinAcc) {
+ //
+ // If the PDPT entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = PdptIndex;
+ MinPdt = (UINTN)-1;
+ ReleasePageAddress = Pdpt + PdptIndex;
+ }
+ }
+ }
+ }
+ }
+ if (!PML4EIgnore) {
+ //
+ // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
+ // it should only has the entries point to 1 GByte Pages
+ //
+ if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
+ Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
+ if (Acc < MinAcc) {
+ //
+ // If the PML4 entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = (UINTN)-1;
+ MinPdt = (UINTN)-1;
+ ReleasePageAddress = Pml4 + Pml4Index;
+ }
+ }
+ }
+ }
+ }
+ //
+ // Make sure one PML4/PDPT/PD entry is selected
+ //
+ ASSERT (MinAcc != (UINT64)-1);
+
+ //
+ // Secondly, insert the page pointed by this entry into page pool and clear this entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
+ *ReleasePageAddress = 0;
+
+ //
+ // Lastly, check this entry's upper entries if need to be inserted into page pool
+ // or not
+ //
+ while (TRUE) {
+ if (MinPdt != (UINTN)-1) {
+ //
+ // If 4 KByte Page Table is released, check the PDPT entry
+ //
+ Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
+ Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
+ SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
+ if (SubEntriesNum == 0 &&
+ (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
+ //
+ // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
+ // clear the Page directory entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
+ Pdpt[MinPdpt] = 0;
+ //
+ // Go on checking the PML4 table
+ //
+ MinPdt = (UINTN)-1;
+ continue;
+ }
+ //
+ // Update the sub-entries filed in PDPT entry and exit
+ //
+ SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
+ break;
+ }
+ if (MinPdpt != (UINTN)-1) {
+ //
+ // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
+ //
+ SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
+ if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
+ //
+ // Release the empty PML4 table if there was no more 1G KByte Page Table entry
+ // clear the Page directory entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
+ Pml4[MinPml4] = 0;
+ MinPdpt = (UINTN)-1;
+ continue;
+ }
+ //
+ // Update the sub-entries filed in PML4 entry and exit
+ //
+ SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
+ break;
+ }
+ //
+ // PLM4 table has been released before, exit it
+ //
+ break;
+ }
+}
+
+/**
+ Allocate free Page for PageFault handler use.
+
+ @return Page address.
+
+**/
+UINT64
+AllocPage (
+ VOID
+ )
+{
+ UINT64 RetVal;
+
+ if (IsListEmpty (&mPagePool)) {
+ //
+ // If page pool is empty, reclaim the used pages and insert one into page pool
+ //
+ ReclaimPages ();
+ }
+
+ //
+ // Get one free page and remove it from page pool
+ //
+ RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
+ RemoveEntryList (mPagePool.ForwardLink);
+ //
+ // Clean this page and return
+ //
+ ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
+ return RetVal;
+}
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+SmiDefaultPFHandler (
+ VOID
+ )
+{
+ UINT64 *PageTable;
+ UINT64 *PageTableTop;
+ UINT64 PFAddress;
+ UINTN StartBit;
+ UINTN EndBit;
+ UINT64 PTIndex;
+ UINTN Index;
+ SMM_PAGE_SIZE_TYPE PageSize;
+ UINTN NumOfPages;
+ UINTN PageAttribute;
+ EFI_STATUS Status;
+ UINT64 *UpperEntry;
+ BOOLEAN Enable5LevelPaging;
+ IA32_CR4 Cr4;
+
+ //
+ // Set default SMM page attribute
+ //
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+
+ EndBit = 0;
+ PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
+ PFAddress = AsmReadCr2 ();
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
+
+ Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
+ //
+ // If platform not support page table attribute, set default SMM page attribute
+ //
+ if (Status != EFI_SUCCESS) {
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+ }
+ if (PageSize >= MaxSmmPageSizeType) {
+ PageSize = SmmPageSize2M;
+ }
+ if (NumOfPages > 512) {
+ NumOfPages = 512;
+ }
+
+ switch (PageSize) {
+ case SmmPageSize4K:
+ //
+ // BIT12 to BIT20 is Page Table index
+ //
+ EndBit = 12;
+ break;
+ case SmmPageSize2M:
+ //
+ // BIT21 to BIT29 is Page Directory index
+ //
+ EndBit = 21;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ case SmmPageSize1G:
+ if (!m1GPageTableSupport) {
+ DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
+ ASSERT (FALSE);
+ }
+ //
+ // BIT30 to BIT38 is Page Directory Pointer Table index
+ //
+ EndBit = 30;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ default:
+ ASSERT (FALSE);
+ }
+
+ //
+ // If execute-disable is enabled, set NX bit
+ //
+ if (mXdEnabled) {
+ PageAttribute |= IA32_PG_NX;
+ }
+
+ for (Index = 0; Index < NumOfPages; Index++) {
+ PageTable = PageTableTop;
+ UpperEntry = NULL;
+ for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+ if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
+ //
+ // If the entry is not present, allocate one page from page pool for it
+ //
+ PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ } else {
+ //
+ // Save the upper entry address
+ //
+ UpperEntry = PageTable + PTIndex;
+ }
+ //
+ // BIT9 to BIT11 of entry is used to save access record,
+ // initialize value is 7
+ //
+ PageTable[PTIndex] |= (UINT64)IA32_PG_A;
+ SetAccNum (PageTable + PTIndex, 7);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
+ }
+
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ //
+ // Check if the entry has already existed, this issue may occur when the different
+ // size page entries created under the same entry
+ //
+ DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
+ DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
+ ASSERT (FALSE);
+ }
+ //
+ // Fill the new entry
+ //
+ PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
+ PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
+ if (UpperEntry != NULL) {
+ SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
+ }
+ //
+ // Get the next page address if we need to create more page tables
+ //
+ PFAddress += (1ull << EndBit);
+ }
+}
+
+/**
+ ThePage Fault handler wrapper for SMM use.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN PFAddress;
+ UINTN GuardPageAddress;
+ UINTN ShadowStackGuardPageAddress;
+ UINTN CpuIndex;
+
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
+
+ AcquireSpinLock (mPFLock);
+
+ PFAddress = AsmReadCr2 ();
+
+ if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
+ // or SMM page protection violation.
+ //
+ if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
+ DumpCpuContext (InterruptType, SystemContext);
+ CpuIndex = GetCpuIndex ();
+ GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
+ ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (PFAddress >= GuardPageAddress) &&
+ (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
+ DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
+ } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (mSmmShadowStackSize > 0) &&
+ (PFAddress >= ShadowStackGuardPageAddress) &&
+ (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE))) {
+ DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
+ } else {
+ if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
+ );
+ } else {
+ DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
+ );
+ }
+
+ if (HEAP_GUARD_NONSTOP_MODE) {
+ GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
+ goto Exit;
+ }
+ }
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If a page fault occurs in non-SMRAM range.
+ //
+ if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
+ if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
+ );
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If NULL pointer was just accessed
+ //
+ if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
+ (PFAddress < EFI_PAGE_SIZE)) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
+ );
+
+ if (NULL_DETECTION_NONSTOP_MODE) {
+ GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
+ goto Exit;
+ }
+
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
+ );
+ CpuDeadLoop ();
+ goto Exit;
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfilePFHandler (
+ SystemContext.SystemContextX64->Rip,
+ SystemContext.SystemContextX64->ExceptionData
+ );
+ } else {
+ SmiDefaultPFHandler ();
+ }
+
+Exit:
+ ReleaseSpinLock (mPFLock);
+}
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ )
+{
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINTN Index5;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ UINT64 *L5PageTable;
+ UINTN PageTableBase;
+ BOOLEAN IsSplitted;
+ BOOLEAN PageTableSplitted;
+ BOOLEAN CetEnabled;
+ BOOLEAN Enable5LevelPaging;
+
+ //
+ // Don't mark page table memory as read-only if
+ // - no restriction on access to non-SMRAM memory; or
+ // - SMM heap guard feature enabled; or
+ // BIT2: SMM page guard enabled
+ // BIT3: SMM pool guard enabled
+ // - SMM profile feature enabled
+ //
+ if (!mCpuSmmRestrictedMemoryAccess ||
+ ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
+ FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ //
+ // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
+ //
+ ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
+ (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
+
+ //
+ // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
+ //
+ ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
+ return ;
+ }
+
+ DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
+
+ //
+ // Disable write protection, because we need mark page table to be write protected.
+ // We need *write* page table memory, to mark itself to be *read only*.
+ //
+ CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
+ if (CetEnabled) {
+ //
+ // CET must be disabled if WP is disabled.
+ //
+ DisableCet();
+ }
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ do {
+ DEBUG ((DEBUG_INFO, "Start...\n"));
+ PageTableSplitted = FALSE;
+ L5PageTable = NULL;
+
+ GetPageTable (&PageTableBase, &Enable5LevelPaging);
+
+ if (Enable5LevelPaging) {
+ L5PageTable = (UINT64 *)PageTableBase;
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+
+ for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
+ if (Enable5LevelPaging) {
+ L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L4PageTable == NULL) {
+ continue;
+ }
+ } else {
+ L4PageTable = (UINT64 *)PageTableBase;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
+ L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L3PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
+ if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
+ // 1G
+ continue;
+ }
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ continue;
+ }
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L1PageTable == NULL) {
+ continue;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+ }
+ }
+ }
+ } while (PageTableSplitted);
+
+ //
+ // Enable write protection, after page table updated.
+ //
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+ if (CetEnabled) {
+ //
+ // re-enable CET.
+ //
+ EnableCet();
+ }
+
+ return ;
+}
+
+/**
+ This function reads CR2 register when on-demand paging is enabled.
+
+ @param[out] *Cr2 Pointer to variable to hold CR2 register value.
+**/
+VOID
+SaveCr2 (
+ OUT UINTN *Cr2
+ )
+{
+ if (!mCpuSmmRestrictedMemoryAccess) {
+ //
+ // On-demand paging is enabled when access to non-SMRAM is not restricted.
+ //
+ *Cr2 = AsmReadCr2 ();
+ }
+}
+
+/**
+ This function restores CR2 register when on-demand paging is enabled.
+
+ @param[in] Cr2 Value to write into CR2 register.
+**/
+VOID
+RestoreCr2 (
+ IN UINTN Cr2
+ )
+{
+ if (!mCpuSmmRestrictedMemoryAccess) {
+ //
+ // On-demand paging is enabled when access to non-SMRAM is not restricted.
+ //
+ AsmWriteCr2 (Cr2);
+ }
+}
+
+/**
+ Return whether access to non-SMRAM is restricted.
+
+ @retval TRUE Access to non-SMRAM is restricted.
+ @retval FALSE Access to non-SMRAM is not restricted.
+**/
+BOOLEAN
+IsRestrictedMemoryAccess (
+ VOID
+ )
+{
+ return mCpuSmmRestrictedMemoryAccess;
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
new file mode 100644
index 00000000..867e5b31
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
@@ -0,0 +1,69 @@
+/** @file
+Semaphore mechanism to indicate to the BSP that an AP has exited SMM
+after SMBASE relocation.
+
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+X86_ASSEMBLY_PATCH_LABEL gPatchSmmRelocationOriginalAddressPtr32;
+X86_ASSEMBLY_PATCH_LABEL gPatchRebasedFlagAddr32;
+
+UINTN mSmmRelocationOriginalAddress;
+volatile BOOLEAN *mRebasedFlag;
+
+/**
+AP Semaphore operation in 32-bit mode while BSP runs in 64-bit mode.
+**/
+VOID
+SmmRelocationSemaphoreComplete32 (
+ VOID
+ );
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE
+ immediately after AP exits SMM.
+
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex,
+ IN volatile BOOLEAN *RebasedFlag
+ )
+{
+ SMRAM_SAVE_STATE_MAP *CpuState;
+ UINTN TempValue;
+
+ mRebasedFlag = RebasedFlag;
+ PatchInstructionX86 (
+ gPatchRebasedFlagAddr32,
+ (UINT32)(UINTN)mRebasedFlag,
+ 4
+ );
+
+ CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
+ mSmmRelocationOriginalAddress = HookReturnFromSmm (
+ CpuIndex,
+ CpuState,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete32,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
+ );
+
+ //
+ // Use temp value to fix ICC compiler warning
+ //
+ TempValue = (UINTN)&mSmmRelocationOriginalAddress;
+ PatchInstructionX86 (
+ gPatchSmmRelocationOriginalAddressPtr32,
+ (UINT32)TempValue,
+ 4
+ );
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
new file mode 100644
index 00000000..334cc6c8
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
@@ -0,0 +1,396 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2019, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmiEntry.nasm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+%include "StuffRsbNasm.inc"
+%include "Nasm.inc"
+
+;
+; Variables referenced by C code
+;
+
+%define MSR_IA32_S_CET 0x6A2
+%define MSR_IA32_CET_SH_STK_EN 0x1
+%define MSR_IA32_CET_WR_SHSTK_EN 0x2
+%define MSR_IA32_CET_ENDBR_EN 0x4
+%define MSR_IA32_CET_LEG_IW_EN 0x8
+%define MSR_IA32_CET_NO_TRACK_EN 0x10
+%define MSR_IA32_CET_SUPPRESS_DIS 0x20
+%define MSR_IA32_CET_SUPPRESS 0x400
+%define MSR_IA32_CET_TRACKER 0x800
+%define MSR_IA32_PL0_SSP 0x6A4
+%define MSR_IA32_INTERRUPT_SSP_TABLE_ADDR 0x6A8
+
+%define CR4_CET 0x800000
+
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
+;
+; Constants relating to PROCESSOR_SMM_DESCRIPTOR
+;
+%define DSC_OFFSET 0xfb00
+%define DSC_GDTPTR 0x30
+%define DSC_GDTSIZ 0x38
+%define DSC_CS 14
+%define DSC_DS 16
+%define DSC_SS 18
+%define DSC_OTHERSEG 20
+;
+; Constants relating to CPU State Save Area
+;
+%define SSM_DR6 0xffd0
+%define SSM_DR7 0xffc8
+
+%define PROTECT_MODE_CS 0x8
+%define PROTECT_MODE_DS 0x20
+%define LONG_MODE_CS 0x38
+%define TSS_SEGMENT 0x40
+%define GDT_SIZE 0x50
+
+extern ASM_PFX(SmiRendezvous)
+extern ASM_PFX(gSmiHandlerIdtr)
+extern ASM_PFX(CpuSmmDebugEntry)
+extern ASM_PFX(CpuSmmDebugExit)
+
+global ASM_PFX(gPatchSmbase)
+extern ASM_PFX(mXdSupported)
+global ASM_PFX(gPatchXdSupported)
+global ASM_PFX(gPatchMsrIa32MiscEnableSupported)
+global ASM_PFX(gPatchSmiStack)
+global ASM_PFX(gPatchSmiCr3)
+global ASM_PFX(gPatch5LevelPagingNeeded)
+global ASM_PFX(gcSmiHandlerTemplate)
+global ASM_PFX(gcSmiHandlerSize)
+
+extern ASM_PFX(mCetSupported)
+global ASM_PFX(mPatchCetSupported)
+global ASM_PFX(mPatchCetPl0Ssp)
+global ASM_PFX(mPatchCetInterruptSsp)
+global ASM_PFX(mPatchCetInterruptSspTable)
+
+ DEFAULT REL
+ SECTION .text
+
+BITS 16
+ASM_PFX(gcSmiHandlerTemplate):
+_SmiEntryPoint:
+ mov bx, _GdtDesc - _SmiEntryPoint + 0x8000
+ mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]
+ dec ax
+ mov [cs:bx], ax
+ mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]
+ mov [cs:bx + 2], eax
+o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
+ mov ax, PROTECT_MODE_CS
+ mov [cs:bx-0x2],ax
+ mov edi, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmbase):
+ lea eax, [edi + (@ProtectedMode - _SmiEntryPoint) + 0x8000]
+ mov [cs:bx-0x6],eax
+ mov ebx, cr0
+ and ebx, 0x9ffafff3
+ or ebx, 0x23
+ mov cr0, ebx
+ jmp dword 0x0:0x0
+_GdtDesc:
+ DW 0
+ DD 0
+
+BITS 32
+@ProtectedMode:
+ mov ax, PROTECT_MODE_DS
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax
+ mov esp, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmiStack):
+ jmp ProtFlatMode
+
+BITS 64
+ProtFlatMode:
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmiCr3):
+ mov cr3, rax
+ mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3
+
+ mov cl, strict byte 0 ; source operand will be patched
+ASM_PFX(gPatch5LevelPagingNeeded):
+ cmp cl, 0
+ je SkipEnable5LevelPaging
+ ;
+ ; Enable 5-Level Paging bit
+ ;
+ bts eax, 12 ; Set LA57 bit (bit #12)
+SkipEnable5LevelPaging:
+
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.
+; Load TSS
+ sub esp, 8 ; reserve room in stack
+ sgdt [rsp]
+ mov eax, [rsp + 2] ; eax = GDT base
+ add esp, 8
+ mov dl, 0x89
+ mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+
+; enable NXE if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(gPatchXdSupported):
+ cmp al, 0
+ jz @SkipXd
+
+; If MSR_IA32_MISC_ENABLE is supported, clear XD Disable bit
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(gPatchMsrIa32MiscEnableSupported):
+ cmp al, 1
+ jz MsrIa32MiscEnableSupported
+
+; MSR_IA32_MISC_ENABLE not supported
+ sub esp, 4
+ xor rdx, rdx
+ push rdx ; don't try to restore the XD Disable bit just before RSM
+ jmp EnableNxe
+
+;
+; Check XD disable bit
+;
+MsrIa32MiscEnableSupported:
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ sub esp, 4
+ push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz EnableNxe
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+EnableNxe:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 8
+@XdDone:
+
+; Switch into @LongMode
+ push LONG_MODE_CS ; push cs hardcore here
+ call Base ; push return address for retf later
+Base:
+ add dword [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
+
+ mov ecx, MSR_EFER
+ rdmsr
+ or ah, 1 ; enable LME
+ wrmsr
+ mov rbx, cr0
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
+ mov cr0, rbx
+ retf
+@LongMode: ; long mode (64-bit code) starts here
+ mov rax, strict qword 0 ; mov rax, ASM_PFX(gSmiHandlerIdtr)
+SmiHandlerIdtrAbsAddr:
+ lidt [rax]
+ lea ebx, [rdi + DSC_OFFSET]
+ mov ax, [rbx + DSC_DS]
+ mov ds, eax
+ mov ax, [rbx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [rbx + DSC_SS]
+ mov ss, eax
+
+ mov rbx, [rsp + 0x8] ; rbx <- CpuIndex
+
+; enable CET if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(mPatchCetSupported):
+ cmp al, 0
+ jz CetDone
+
+ mov ecx, MSR_IA32_S_CET
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_PL0_SSP
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_S_CET
+ mov eax, MSR_IA32_CET_SH_STK_EN
+ xor edx, edx
+ wrmsr
+
+ mov ecx, MSR_IA32_PL0_SSP
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetPl0Ssp):
+ xor edx, edx
+ wrmsr
+ mov rcx, cr0
+ btr ecx, 16 ; clear WP
+ mov cr0, rcx
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetInterruptSspTable):
+ xor edx, edx
+ wrmsr
+
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetInterruptSsp):
+ cmp eax, 0
+ jz CetInterruptDone
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+CetInterruptDone:
+
+ mov rcx, cr0
+ bts ecx, 16 ; set WP
+ mov cr0, rcx
+
+ mov eax, 0x668 | CR4_CET
+ mov cr4, rax
+
+ SETSSBSY
+
+CetDone:
+
+ ;
+ ; Save FP registers
+ ;
+ sub rsp, 0x200
+ fxsave64 [rsp]
+
+ add rsp, -0x20
+
+ mov rcx, rbx
+ mov rax, strict qword 0 ; call ASM_PFX(CpuSmmDebugEntry)
+CpuSmmDebugEntryAbsAddr:
+ call rax
+
+ mov rcx, rbx
+ mov rax, strict qword 0 ; call ASM_PFX(SmiRendezvous)
+SmiRendezvousAbsAddr:
+ call rax
+
+ mov rcx, rbx
+ mov rax, strict qword 0 ; call ASM_PFX(CpuSmmDebugExit)
+CpuSmmDebugExitAbsAddr:
+ call rax
+
+ add rsp, 0x20
+
+ ;
+ ; Restore FP registers
+ ;
+ fxrstor64 [rsp]
+
+ add rsp, 0x200
+
+ mov rax, strict qword 0 ; mov rax, ASM_PFX(mCetSupported)
+mCetSupportedAbsAddr:
+ mov al, [rax]
+ cmp al, 0
+ jz CetDone2
+
+ mov eax, 0x668
+ mov cr4, rax ; disable CET
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ pop rax
+ pop rdx
+ wrmsr
+
+ mov ecx, MSR_IA32_PL0_SSP
+ pop rax
+ pop rdx
+ wrmsr
+
+ mov ecx, MSR_IA32_S_CET
+ pop rax
+ pop rdx
+ wrmsr
+CetDone2:
+
+ mov rax, strict qword 0 ; lea rax, [ASM_PFX(mXdSupported)]
+mXdSupportedAbsAddr:
+ mov al, [rax]
+ cmp al, 0
+ jz .1
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .1
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.1:
+
+ StuffRsb64
+ rsm
+
+ASM_PFX(gcSmiHandlerSize) DW $ - _SmiEntryPoint
+
+;
+; Retrieve the address and fill it into mov opcode.
+;
+; It is called in the driver entry point first.
+; It is used to fix up the real address in mov opcode.
+; Then, after the code logic is copied to the different location,
+; the code can also run.
+;
+global ASM_PFX(PiSmmCpuSmiEntryFixupAddress)
+ASM_PFX(PiSmmCpuSmiEntryFixupAddress):
+ lea rax, [ASM_PFX(gSmiHandlerIdtr)]
+ lea rcx, [SmiHandlerIdtrAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(CpuSmmDebugEntry)]
+ lea rcx, [CpuSmmDebugEntryAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(SmiRendezvous)]
+ lea rcx, [SmiRendezvousAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(CpuSmmDebugExit)]
+ lea rcx, [CpuSmmDebugExitAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(mXdSupported)]
+ lea rcx, [mXdSupportedAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(mCetSupported)]
+ lea rcx, [mCetSupportedAbsAddr]
+ mov qword [rcx - 8], rax
+ ret
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
new file mode 100644
index 00000000..ec497190
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
@@ -0,0 +1,378 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmiException.nasm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+extern ASM_PFX(SmiPFHandler)
+
+global ASM_PFX(gcSmiIdtr)
+global ASM_PFX(gcSmiGdtr)
+global ASM_PFX(gcPsd)
+
+ SECTION .data
+
+NullSeg: DQ 0 ; reserved by architecture
+CodeSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeCodeSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeSsSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x93
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+DataSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x93
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+CodeSeg16:
+ DW -1
+ DW 0
+ DB 0
+ DB 0x9b
+ DB 0x8f
+ DB 0
+DataSeg16:
+ DW -1
+ DW 0
+ DB 0
+ DB 0x93
+ DB 0x8f
+ DB 0
+CodeSeg64:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xaf ; LimitHigh
+ DB 0 ; BaseHigh
+; TSS Segment for X64 specially
+TssSeg:
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x89
+ DB 0x80 ; LimitHigh
+ DB 0 ; BaseHigh
+ DD 0 ; BaseUpper
+ DD 0 ; Reserved
+GDT_SIZE equ $ - NullSeg
+
+; Create TSS Descriptor just after GDT
+TssDescriptor:
+ DD 0 ; Reserved
+ DQ 0 ; RSP0
+ DQ 0 ; RSP1
+ DQ 0 ; RSP2
+ DD 0 ; Reserved
+ DD 0 ; Reserved
+ DQ 0 ; IST1
+ DQ 0 ; IST2
+ DQ 0 ; IST3
+ DQ 0 ; IST4
+ DQ 0 ; IST5
+ DQ 0 ; IST6
+ DQ 0 ; IST7
+ DD 0 ; Reserved
+ DD 0 ; Reserved
+ DW 0 ; Reserved
+ DW 0 ; I/O Map Base Address
+TSS_DESC_SIZE equ $ - TssDescriptor
+
+;
+; This structure serves as a template for all processors.
+;
+ASM_PFX(gcPsd):
+ DB 'PSDSIG '
+ DW PSD_SIZE
+ DW 2
+ DW 1 << 2
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW 0
+ DQ 0
+ DQ 0
+ DQ 0 ; fixed in InitializeMpServiceData()
+ DQ NullSeg
+ DD GDT_SIZE
+ DD 0
+ times 24 DB 0
+ DQ 0
+PSD_SIZE equ $ - ASM_PFX(gcPsd)
+
+;
+; CODE & DATA segments for SMM runtime
+;
+CODE_SEL equ CodeSeg64 - NullSeg
+DATA_SEL equ DataSeg32 - NullSeg
+CODE32_SEL equ CodeSeg32 - NullSeg
+
+ASM_PFX(gcSmiGdtr):
+ DW GDT_SIZE - 1
+ DQ NullSeg
+
+ASM_PFX(gcSmiIdtr):
+ DW 0
+ DQ 0
+
+ DEFAULT REL
+ SECTION .text
+
+;------------------------------------------------------------------------------
+; _SmiExceptionEntryPoints is the collection of exception entrypoints followed
+; by a common exception handler.
+;
+; Stack frame would be as follows as specified in IA32 manuals:
+;
+; +---------------------+ <-- 16-byte aligned ensured by processor
+; + Old SS +
+; +---------------------+
+; + Old RSP +
+; +---------------------+
+; + RFlags +
+; +---------------------+
+; + CS +
+; +---------------------+
+; + RIP +
+; +---------------------+
+; + Error Code +
+; +---------------------+
+; + Vector Number +
+; +---------------------+
+; + RBP +
+; +---------------------+ <-- RBP, 16-byte aligned
+;
+; RSP set to odd multiple of 8 at @CommonEntryPoint means ErrCode PRESENT
+;------------------------------------------------------------------------------
+global ASM_PFX(PageFaultIdtHandlerSmmProfile)
+ASM_PFX(PageFaultIdtHandlerSmmProfile):
+ push 0xe ; Page Fault
+ test spl, 8 ; odd multiple of 8 => ErrCode present
+ jnz .0
+ push qword [rsp] ; duplicate INT# if no ErrCode
+ mov qword [rsp + 8], 0
+.0:
+ push rbp
+ mov rbp, rsp
+
+ ;
+ ; Since here the stack pointer is 16-byte aligned, so
+ ; EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
+ ; is 16-byte aligned
+ ;
+
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ push r15
+ push r14
+ push r13
+ push r12
+ push r11
+ push r10
+ push r9
+ push r8
+ push rax
+ push rcx
+ push rdx
+ push rbx
+ push qword [rbp + 48] ; RSP
+ push qword [rbp] ; RBP
+ push rsi
+ push rdi
+
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
+ movzx rax, word [rbp + 56]
+ push rax ; for ss
+ movzx rax, word [rbp + 32]
+ push rax ; for cs
+ mov rax, ds
+ push rax
+ mov rax, es
+ push rax
+ mov rax, fs
+ push rax
+ mov rax, gs
+ push rax
+
+;; UINT64 Rip;
+ push qword [rbp + 24]
+
+;; UINT64 Gdtr[2], Idtr[2];
+ sub rsp, 16
+ sidt [rsp]
+ sub rsp, 16
+ sgdt [rsp]
+
+;; UINT64 Ldtr, Tr;
+ xor rax, rax
+ str ax
+ push rax
+ sldt ax
+ push rax
+
+;; UINT64 RFlags;
+ push qword [rbp + 40]
+
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ mov rax, cr8
+ push rax
+ mov rax, cr4
+ or rax, 0x208
+ mov cr4, rax
+ push rax
+ mov rax, cr3
+ push rax
+ mov rax, cr2
+ push rax
+ xor rax, rax
+ push rax
+ mov rax, cr0
+ push rax
+
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov rax, dr7
+ push rax
+ mov rax, dr6
+ push rax
+ mov rax, dr3
+ push rax
+ mov rax, dr2
+ push rax
+ mov rax, dr1
+ push rax
+ mov rax, dr0
+ push rax
+
+;; FX_SAVE_STATE_X64 FxSaveState;
+
+ sub rsp, 512
+ mov rdi, rsp
+ fxsave [rdi]
+
+; UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push qword [rbp + 16]
+
+;; call into exception handler
+ mov rcx, [rbp + 8]
+ lea rax, [ASM_PFX(SmiPFHandler)]
+
+;; Prepare parameter and call
+ mov rdx, rsp
+ ;
+ ; Per X64 calling convention, allocate maximum parameter stack space
+ ; and make sure RSP is 16-byte aligned
+ ;
+ sub rsp, 4 * 8 + 8
+ call rax
+ add rsp, 4 * 8 + 8
+ jmp .1
+
+.1:
+;; UINT64 ExceptionData;
+ add rsp, 8
+
+;; FX_SAVE_STATE_X64 FxSaveState;
+
+ mov rsi, rsp
+ fxrstor [rsi]
+ add rsp, 512
+
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support debuggers
+;; that set breakpoints in interrupt/exception context
+ add rsp, 8 * 6
+
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ pop rax
+ mov cr0, rax
+ add rsp, 8 ; not for Cr1
+ pop rax
+ mov cr2, rax
+ pop rax
+ mov cr3, rax
+ pop rax
+ mov cr4, rax
+ pop rax
+ mov cr8, rax
+
+;; UINT64 RFlags;
+ pop qword [rbp + 40]
+
+;; UINT64 Ldtr, Tr;
+;; UINT64 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add rsp, 48
+
+;; UINT64 Rip;
+ pop qword [rbp + 24]
+
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
+ pop rax
+ ; mov gs, rax ; not for gs
+ pop rax
+ ; mov fs, rax ; not for fs
+ ; (X64 will not use fs and gs, so we do not restore it)
+ pop rax
+ mov es, rax
+ pop rax
+ mov ds, rax
+ pop qword [rbp + 32] ; for cs
+ pop qword [rbp + 56] ; for ss
+
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ pop rdi
+ pop rsi
+ add rsp, 8 ; not for rbp
+ pop qword [rbp + 48] ; for rsp
+ pop rbx
+ pop rdx
+ pop rcx
+ pop rax
+ pop r8
+ pop r9
+ pop r10
+ pop r11
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+
+ mov rsp, rbp
+
+; Enable TF bit after page fault handler runs
+ bts dword [rsp + 40], 8 ;RFLAGS
+
+ pop rbp
+ add rsp, 16 ; skip INT# & ErrCode
+ iretq
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
new file mode 100644
index 00000000..1f30c6d4
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
@@ -0,0 +1,218 @@
+/** @file
+ SMM CPU misc functions for x64 arch specific.
+
+Copyright (c) 2015 - 2019, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+EFI_PHYSICAL_ADDRESS mGdtBuffer;
+UINTN mGdtBufferSize;
+
+extern BOOLEAN mCetSupported;
+extern UINTN mSmmShadowStackSize;
+
+X86_ASSEMBLY_PATCH_LABEL mPatchCetPl0Ssp;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSsp;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSspTable;
+UINT32 mCetPl0Ssp;
+UINT32 mCetInterruptSsp;
+UINT32 mCetInterruptSspTable;
+
+UINTN mSmmInterruptSspTables;
+
+/**
+ Initialize IDT for SMM Stack Guard.
+
+**/
+VOID
+EFIAPI
+InitializeIDTSmmStackGuard (
+ VOID
+ )
+{
+ IA32_IDT_GATE_DESCRIPTOR *IdtGate;
+
+ //
+ // If SMM Stack Guard feature is enabled, set the IST field of
+ // the interrupt gate for Page Fault Exception to be 1
+ //
+ IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
+ IdtGate += EXCEPT_IA32_PAGE_FAULT;
+ IdtGate->Bits.Reserved_0 = 1;
+}
+
+/**
+ Initialize Gdt for all processors.
+
+ @param[in] Cr3 CR3 value.
+ @param[out] GdtStepSize The step size for GDT table.
+
+ @return GdtBase for processor 0.
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)
+**/
+VOID *
+InitGdt (
+ IN UINTN Cr3,
+ OUT UINTN *GdtStepSize
+ )
+{
+ UINTN Index;
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
+ UINTN TssBase;
+ UINTN GdtTssTableSize;
+ UINT8 *GdtTssTables;
+ UINTN GdtTableStepSize;
+
+ //
+ // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
+ // on each SMI entry.
+ //
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
+ ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
+
+ //
+ // Fixup TSS descriptors
+ //
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
+ GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // Setup top of known good stack as IST1 for each processor.
+ //
+ *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * (mSmmStackSize + mSmmShadowStackSize));
+ }
+ }
+
+ *GdtStepSize = GdtTableStepSize;
+ return GdtTssTables;
+}
+
+/**
+ Get Protected mode code segment from current GDT table.
+
+ @return Protected mode code segment value.
+**/
+UINT16
+GetProtectedModeCS (
+ VOID
+ )
+{
+ IA32_DESCRIPTOR GdtrDesc;
+ IA32_SEGMENT_DESCRIPTOR *GdtEntry;
+ UINTN GdtEntryCount;
+ UINT16 Index;
+
+ AsmReadGdtr (&GdtrDesc);
+ GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
+ GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
+ for (Index = 0; Index < GdtEntryCount; Index++) {
+ if (GdtEntry->Bits.L == 0) {
+ if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.DB == 1) {
+ break;
+ }
+ }
+ GdtEntry++;
+ }
+ ASSERT (Index != GdtEntryCount);
+ return Index * 8;
+}
+
+/**
+ Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
+
+ @param[in] ApHltLoopCode The address of the safe hlt-loop function.
+ @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
+ @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
+
+**/
+VOID
+TransferApToSafeState (
+ IN UINTN ApHltLoopCode,
+ IN UINTN TopOfStack,
+ IN UINTN NumberToFinishAddress
+ )
+{
+ AsmDisablePaging64 (
+ GetProtectedModeCS (),
+ (UINT32)ApHltLoopCode,
+ (UINT32)NumberToFinishAddress,
+ 0,
+ (UINT32)TopOfStack
+ );
+ //
+ // It should never reach here
+ //
+ ASSERT (FALSE);
+}
+
+/**
+ Initialize the shadow stack related data structure.
+
+ @param CpuIndex The index of CPU.
+ @param ShadowStack The bottom of the shadow stack for this CPU.
+**/
+VOID
+InitShadowStack (
+ IN UINTN CpuIndex,
+ IN VOID *ShadowStack
+ )
+{
+ UINTN SmmShadowStackSize;
+ UINT64 *InterruptSspTable;
+ UINT32 InterruptSsp;
+
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ SmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ SmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
+ }
+ mCetPl0Ssp = (UINT32)((UINTN)ShadowStack + SmmShadowStackSize - sizeof(UINT64));
+ PatchInstructionX86 (mPatchCetPl0Ssp, mCetPl0Ssp, 4);
+ DEBUG ((DEBUG_INFO, "mCetPl0Ssp - 0x%x\n", mCetPl0Ssp));
+ DEBUG ((DEBUG_INFO, "ShadowStack - 0x%x\n", ShadowStack));
+ DEBUG ((DEBUG_INFO, " SmmShadowStackSize - 0x%x\n", SmmShadowStackSize));
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ if (mSmmInterruptSspTables == 0) {
+ mSmmInterruptSspTables = (UINTN)AllocateZeroPool(sizeof(UINT64) * 8 * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
+ ASSERT (mSmmInterruptSspTables != 0);
+ DEBUG ((DEBUG_INFO, "mSmmInterruptSspTables - 0x%x\n", mSmmInterruptSspTables));
+ }
+
+ //
+ // The highest address on the stack (0xFF8) is a save-previous-ssp token pointing to a location that is 40 bytes away - 0xFD0.
+ // The supervisor shadow stack token is just above it at address 0xFF0. This is where the interrupt SSP table points.
+ // So when an interrupt of exception occurs, we can use SAVESSP/RESTORESSP/CLEARSSBUSY for the supervisor shadow stack,
+ // due to the reason the RETF in SMM exception handler cannot clear the BUSY flag with same CPL.
+ // (only IRET or RETF with different CPL can clear BUSY flag)
+ // Please refer to UefiCpuPkg/Library/CpuExceptionHandlerLib/X64 for the full stack frame at runtime.
+ //
+ InterruptSsp = (UINT32)((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - sizeof(UINT64));
+ *(UINT32 *)(UINTN)InterruptSsp = (InterruptSsp - sizeof(UINT64) * 4) | 0x2;
+ mCetInterruptSsp = InterruptSsp - sizeof(UINT64);
+
+ mCetInterruptSspTable = (UINT32)(UINTN)(mSmmInterruptSspTables + sizeof(UINT64) * 8 * CpuIndex);
+ InterruptSspTable = (UINT64 *)(UINTN)mCetInterruptSspTable;
+ InterruptSspTable[1] = mCetInterruptSsp;
+ PatchInstructionX86 (mPatchCetInterruptSsp, mCetInterruptSsp, 4);
+ PatchInstructionX86 (mPatchCetInterruptSspTable, mCetInterruptSspTable, 4);
+ DEBUG ((DEBUG_INFO, "mCetInterruptSsp - 0x%x\n", mCetInterruptSsp));
+ DEBUG ((DEBUG_INFO, "mCetInterruptSspTable - 0x%x\n", mCetInterruptSspTable));
+ }
+ }
+}
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
new file mode 100644
index 00000000..1c5648ee
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
@@ -0,0 +1,146 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmmInit.nasm
+;
+; Abstract:
+;
+; Functions for relocating SMBASE's for all processors
+;
+;-------------------------------------------------------------------------------
+
+%include "StuffRsbNasm.inc"
+
+extern ASM_PFX(SmmInitHandler)
+extern ASM_PFX(mRebasedFlag)
+extern ASM_PFX(mSmmRelocationOriginalAddress)
+
+global ASM_PFX(gPatchSmmCr3)
+global ASM_PFX(gPatchSmmCr4)
+global ASM_PFX(gPatchSmmCr0)
+global ASM_PFX(gPatchSmmInitStack)
+global ASM_PFX(gcSmiInitGdtr)
+global ASM_PFX(gcSmmInitSize)
+global ASM_PFX(gcSmmInitTemplate)
+global ASM_PFX(gPatchRebasedFlagAddr32)
+global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
+
+%define LONG_MODE_CS 0x38
+
+ DEFAULT REL
+ SECTION .text
+
+ASM_PFX(gcSmiInitGdtr):
+ DW 0
+ DQ 0
+
+global ASM_PFX(SmmStartup)
+
+BITS 16
+ASM_PFX(SmmStartup):
+ mov eax, 0x80000001 ; read capability
+ cpuid
+ mov ebx, edx ; rdmsr will change edx. keep it in ebx.
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr3):
+ mov cr3, eax
+o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr4):
+ or ah, 2 ; enable XMM registers access
+ mov cr4, eax
+ mov ecx, 0xc0000080 ; IA32_EFER MSR
+ rdmsr
+ or ah, BIT0 ; set LME bit
+ test ebx, BIT20 ; check NXE capability
+ jz .1
+ or ah, BIT3 ; set NXE bit
+.1:
+ wrmsr
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr0):
+ mov cr0, eax ; enable protected mode & paging
+ jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
+@PatchLongModeOffset:
+
+BITS 64
+@LongMode: ; long-mode starts here
+ mov rsp, strict qword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmInitStack):
+ and sp, 0xfff0 ; make sure RSP is 16-byte aligned
+ ;
+ ; According to X64 calling convention, XMM0~5 are volatile, we need to save
+ ; them before calling C-function.
+ ;
+ sub rsp, 0x60
+ movdqa [rsp], xmm0
+ movdqa [rsp + 0x10], xmm1
+ movdqa [rsp + 0x20], xmm2
+ movdqa [rsp + 0x30], xmm3
+ movdqa [rsp + 0x40], xmm4
+ movdqa [rsp + 0x50], xmm5
+
+ add rsp, -0x20
+ call ASM_PFX(SmmInitHandler)
+ add rsp, 0x20
+
+ ;
+ ; Restore XMM0~5 after calling C-function.
+ ;
+ movdqa xmm0, [rsp]
+ movdqa xmm1, [rsp + 0x10]
+ movdqa xmm2, [rsp + 0x20]
+ movdqa xmm3, [rsp + 0x30]
+ movdqa xmm4, [rsp + 0x40]
+ movdqa xmm5, [rsp + 0x50]
+
+ StuffRsb64
+ rsm
+
+BITS 16
+ASM_PFX(gcSmmInitTemplate):
+ mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
+ sub ebp, 0x30000
+ jmp ebp
+@L1:
+ DQ 0; ASM_PFX(SmmStartup)
+
+ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
+
+BITS 64
+global ASM_PFX(SmmRelocationSemaphoreComplete)
+ASM_PFX(SmmRelocationSemaphoreComplete):
+ push rax
+ mov rax, [ASM_PFX(mRebasedFlag)]
+ mov byte [rax], 1
+ pop rax
+ jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
+
+;
+; Semaphore code running in 32-bit mode
+;
+BITS 32
+global ASM_PFX(SmmRelocationSemaphoreComplete32)
+ASM_PFX(SmmRelocationSemaphoreComplete32):
+ push eax
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchRebasedFlagAddr32):
+ mov byte [eax], 1
+ pop eax
+ jmp dword [dword 0] ; destination will be patched
+ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
+
+BITS 64
+global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
+ASM_PFX(PiSmmCpuSmmInitFixupAddress):
+ lea rax, [@LongMode]
+ lea rcx, [@PatchLongModeOffset - 6]
+ mov dword [rcx], eax
+
+ lea rax, [ASM_PFX(SmmStartup)]
+ lea rcx, [@L1]
+ mov qword [rcx], rax
+ ret
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
new file mode 100644
index 00000000..4e5f9d30
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
@@ -0,0 +1,333 @@
+/** @file
+X64 processor specific functions to enable SMM profile.
+
+Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+//
+// Current page index.
+//
+UINTN mPFPageIndex;
+
+//
+// Pool for dynamically creating page table in page fault handler.
+//
+UINT64 mPFPageBuffer;
+
+//
+// Store the uplink information for each page being used.
+//
+UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS Pages;
+ UINT64 *PTEntry;
+
+ //
+ // Generate PAE page table for the first 4GB memory space
+ //
+ Pages = Gen4GPageTable (FALSE);
+
+ //
+ // Fill Page-Table-Level4 (PML4) entry
+ //
+ PTEntry = (UINT64*)AllocatePageTableMemory (1);
+ ASSERT (PTEntry != NULL);
+ *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
+
+ //
+ // Return the address of PML4 (to set CR3)
+ //
+ mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;
+
+ return ;
+}
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ )
+{
+ VOID *Address;
+
+ //
+ // Pre-Allocate memory for page fault handler
+ //
+ Address = NULL;
+ Address = AllocatePages (MAX_PF_PAGE_COUNT);
+ ASSERT (Address != NULL);
+
+ mPFPageBuffer = (UINT64)(UINTN) Address;
+ mPFPageIndex = 0;
+ ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);
+ ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));
+
+ return;
+}
+
+/**
+ Allocate one page for creating 4KB-page based on 2MB-page.
+
+ @param Uplink The address of Page-Directory entry.
+
+**/
+VOID
+AcquirePage (
+ UINT64 *Uplink
+ )
+{
+ UINT64 Address;
+
+ //
+ // Get the buffer
+ //
+ Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);
+ ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);
+
+ //
+ // Cut the previous uplink if it exists and wasn't overwritten
+ //
+ if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK) == Address)) {
+ *mPFPageUplink[mPFPageIndex] = 0;
+ }
+
+ //
+ // Link & Record the current uplink
+ //
+ *Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ mPFPageUplink[mPFPageIndex] = Uplink;
+
+ mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ )
+{
+ UINTN PTIndex;
+ UINT64 Address;
+ BOOLEAN Nx;
+ BOOLEAN Existed;
+ UINTN Index;
+ UINTN PFIndex;
+ IA32_CR4 Cr4;
+ BOOLEAN Enable5LevelPaging;
+
+ ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+
+ //
+ // If page fault address is 4GB above.
+ //
+
+ //
+ // Check if page fault address has existed in page table.
+ // If it exists in page table but page fault is generated,
+ // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
+ //
+ Existed = FALSE;
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
+ PTIndex = 0;
+ if (Enable5LevelPaging) {
+ PTIndex = BitFieldRead64 (PFAddress, 48, 56);
+ }
+ if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {
+ // PML5E
+ if (Enable5LevelPaging) {
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ }
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ // PML4E
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ // PDPTE
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);
+ // PD
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
+ //
+ // 2MB page
+ //
+ Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
+ Existed = TRUE;
+ }
+ } else {
+ //
+ // 4KB page
+ //
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
+ if (PageTable != 0) {
+ //
+ // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
+ //
+ PTIndex = BitFieldRead64 (PFAddress, 12, 20);
+ Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
+ Existed = TRUE;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // If page entry does not existed in page table at all, create a new entry.
+ //
+ if (!Existed) {
+
+ if (IsAddressValid (PFAddress, &Nx)) {
+ //
+ // If page fault address above 4GB is in protected range but it causes a page fault exception,
+ // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
+ // this access is not saved into SMM profile data.
+ //
+ *IsValidPFAddress = TRUE;
+ }
+
+ //
+ // Create one entry in page table for page fault address.
+ //
+ SmiDefaultPFHandler ();
+ //
+ // Find the page table entry created just now.
+ //
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
+ PFAddress = AsmReadCr2 ();
+ // PML5E
+ if (Enable5LevelPaging) {
+ PTIndex = BitFieldRead64 (PFAddress, 48, 56);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ }
+ // PML4E
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ // PDPTE
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ // PD
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);
+ Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;
+ //
+ // Check if 2MB-page entry need be changed to 4KB-page entry.
+ //
+ if (IsAddressSplit (Address)) {
+ AcquirePage (&PageTable[PTIndex]);
+
+ // PTE
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ for (Index = 0; Index < 512; Index++) {
+ PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ if (!IsAddressValid (Address, &Nx)) {
+ PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
+ }
+ if (Nx && mXdSupported) {
+ PageTable[Index] = PageTable[Index] | IA32_PG_NX;
+ }
+ if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
+ PTIndex = Index;
+ }
+ Address += SIZE_4KB;
+ } // end for PT
+ } else {
+ //
+ // Update 2MB page entry.
+ //
+ if (!IsAddressValid (Address, &Nx)) {
+ //
+ // Patch to remove present flag and rw flag.
+ //
+ PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
+ }
+ //
+ // Set XD bit to 1
+ //
+ if (Nx && mXdSupported) {
+ PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;
+ }
+ }
+ }
+
+ //
+ // Record old entries with non-present status
+ // Old entries include the memory which instruction is at and the memory which instruction access.
+ //
+ //
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
+ PFIndex = mPFEntryCount[CpuIndex];
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
+ mPFEntryCount[CpuIndex]++;
+ }
+
+ //
+ // Add present flag or clear XD flag to make page fault handler succeed.
+ //
+ PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {
+ //
+ // If page fault is caused by instruction fetch, clear XD bit in the entry.
+ //
+ PageTable[PTIndex] &= ~IA32_PG_NX;
+ }
+
+ return;
+}
+
+/**
+ Clear TF in FLAGS.
+
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+
+**/
+VOID
+ClearTrapFlag (
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
new file mode 100644
index 00000000..36a1c287
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
@@ -0,0 +1,99 @@
+/** @file
+X64 processor specific header file to enable SMM profile.
+
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _SMM_PROFILE_ARCH_H_
+#define _SMM_PROFILE_ARCH_H_
+
+#pragma pack (1)
+
+typedef struct _MSR_DS_AREA_STRUCT {
+ UINT64 BTSBufferBase;
+ UINT64 BTSIndex;
+ UINT64 BTSAbsoluteMaximum;
+ UINT64 BTSInterruptThreshold;
+ UINT64 PEBSBufferBase;
+ UINT64 PEBSIndex;
+ UINT64 PEBSAbsoluteMaximum;
+ UINT64 PEBSInterruptThreshold;
+ UINT64 PEBSCounterReset[2];
+ UINT64 Reserved;
+} MSR_DS_AREA_STRUCT;
+
+typedef struct _BRANCH_TRACE_RECORD {
+ UINT64 LastBranchFrom;
+ UINT64 LastBranchTo;
+ UINT64 Rsvd0 : 4;
+ UINT64 BranchPredicted : 1;
+ UINT64 Rsvd1 : 59;
+} BRANCH_TRACE_RECORD;
+
+typedef struct _PEBS_RECORD {
+ UINT64 Rflags;
+ UINT64 LinearIP;
+ UINT64 Rax;
+ UINT64 Rbx;
+ UINT64 Rcx;
+ UINT64 Rdx;
+ UINT64 Rsi;
+ UINT64 Rdi;
+ UINT64 Rbp;
+ UINT64 Rsp;
+ UINT64 R8;
+ UINT64 R9;
+ UINT64 R10;
+ UINT64 R11;
+ UINT64 R12;
+ UINT64 R13;
+ UINT64 R14;
+ UINT64 R15;
+} PEBS_RECORD;
+
+#pragma pack ()
+
+#define PHYSICAL_ADDRESS_MASK ((1ull << 52) - SIZE_4KB)
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ );
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ );
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_ARCH_H_