summaryrefslogtreecommitdiffstats
path: root/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64
diff options
context:
space:
mode:
Diffstat (limited to 'src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64')
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm34
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm189
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c1325
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c69
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm396
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm378
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c218
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm146
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c333
-rw-r--r--src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h99
10 files changed, 3187 insertions, 0 deletions
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm
new file mode 100644
index 00000000..99172364
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Cet.nasm
@@ -0,0 +1,34 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;-------------------------------------------------------------------------------
+
+%include "Nasm.inc"
+
+DEFAULT REL
+SECTION .text
+
+global ASM_PFX(DisableCet)
+ASM_PFX(DisableCet):
+
+ ; Skip the pushed data for call
+ mov rax, 1
+ INCSSP_RAX
+
+ mov rax, cr4
+ btr eax, 23 ; clear CET
+ mov cr4, rax
+ ret
+
+global ASM_PFX(EnableCet)
+ASM_PFX(EnableCet):
+
+ mov rax, cr4
+ bts eax, 23 ; set CET
+ mov cr4, rax
+
+ ; use jmp to skip the check for ret
+ pop rax
+ jmp rax
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm
new file mode 100644
index 00000000..3eff0772
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/MpFuncs.nasm
@@ -0,0 +1,189 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; MpFuncs.nasm
+;
+; Abstract:
+;
+; This is the assembly code for Multi-processor S3 support
+;
+;-------------------------------------------------------------------------------
+
+%define VacantFlag 0x0
+%define NotVacantFlag 0xff
+
+%define LockLocation RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+%define StackStartAddressLocation LockLocation + 0x8
+%define StackSizeLocation LockLocation + 0x10
+%define CProcedureLocation LockLocation + 0x18
+%define GdtrLocation LockLocation + 0x20
+%define IdtrLocation LockLocation + 0x2A
+%define BufferStartLocation LockLocation + 0x34
+%define Cr3OffsetLocation LockLocation + 0x38
+%define InitializeFloatingPointUnitsAddress LockLocation + 0x3C
+
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+;procedure serializes all the AP processors through an Init sequence. It must be
+;noted that APs arrive here very raw...ie: real mode, no stack.
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+;IS IN MACHINE CODE.
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+;text SEGMENT
+DEFAULT REL
+SECTION .text
+
+BITS 16
+global ASM_PFX(RendezvousFunnelProc)
+ASM_PFX(RendezvousFunnelProc):
+RendezvousFunnelProcStart:
+
+; At this point CS = 0x(vv00) and ip= 0x0.
+
+ mov ax, cs
+ mov ds, ax
+ mov es, ax
+ mov ss, ax
+ xor ax, ax
+ mov fs, ax
+ mov gs, ax
+
+flat32Start:
+
+ mov si, BufferStartLocation
+ mov edx,dword [si] ; EDX is keeping the start address of wakeup buffer
+
+ mov si, Cr3OffsetLocation
+ mov ecx,dword [si] ; ECX is keeping the value of CR3
+
+ mov si, GdtrLocation
+o32 lgdt [cs:si]
+
+ mov si, IdtrLocation
+o32 lidt [cs:si]
+
+ xor ax, ax
+ mov ds, ax
+
+ mov eax, cr0 ; Get control register 0
+ or eax, 0x000000001 ; Set PE bit (bit #0)
+ mov cr0, eax
+
+FLAT32_JUMP:
+
+a32 jmp dword 0x20:0x0
+
+BITS 32
+PMODE_ENTRY: ; protected mode entry point
+
+ mov ax, 0x18
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax ; Flat mode setup.
+
+ mov eax, cr4
+ bts eax, 5
+ mov cr4, eax
+
+ mov cr3, ecx
+
+ mov esi, edx ; Save wakeup buffer address
+
+ mov ecx, 0xc0000080 ; EFER MSR number.
+ rdmsr ; Read EFER.
+ bts eax, 8 ; Set LME=1.
+ wrmsr ; Write EFER.
+
+ mov eax, cr0 ; Read CR0.
+ bts eax, 31 ; Set PG=1.
+ mov cr0, eax ; Write CR0.
+
+LONG_JUMP:
+
+a16 jmp dword 0x38:0x0
+
+BITS 64
+LongModeStart:
+
+ mov ax, 0x30
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov ss, ax
+
+ mov edi, esi
+ add edi, LockLocation
+ mov al, NotVacantFlag
+TestLock:
+ xchg byte [edi], al
+ cmp al, NotVacantFlag
+ jz TestLock
+
+ProgramStack:
+
+ mov edi, esi
+ add edi, StackSizeLocation
+ mov rax, qword [edi]
+ mov edi, esi
+ add edi, StackStartAddressLocation
+ add rax, qword [edi]
+ mov rsp, rax
+ mov qword [edi], rax
+
+Releaselock:
+
+ mov al, VacantFlag
+ mov edi, esi
+ add edi, LockLocation
+ xchg byte [edi], al
+
+ ;
+ ; Call assembly function to initialize FPU.
+ ;
+ mov rax, qword [esi + InitializeFloatingPointUnitsAddress]
+ sub rsp, 0x20
+ call rax
+ add rsp, 0x20
+
+ ;
+ ; Call C Function
+ ;
+ mov edi, esi
+ add edi, CProcedureLocation
+ mov rax, qword [edi]
+
+ test rax, rax
+ jz GoToSleep
+
+ sub rsp, 0x20
+ call rax
+ add rsp, 0x20
+
+GoToSleep:
+ cli
+ hlt
+ jmp $-2
+
+RendezvousFunnelProcEnd:
+
+;-------------------------------------------------------------------------------------
+; AsmGetAddressMap (&AddressMap);
+;-------------------------------------------------------------------------------------
+; comments here for definition of address map
+global ASM_PFX(AsmGetAddressMap)
+ASM_PFX(AsmGetAddressMap):
+ lea rax, [RendezvousFunnelProcStart]
+ mov qword [rcx], rax
+ mov qword [rcx+0x8], PMODE_ENTRY - RendezvousFunnelProcStart
+ mov qword [rcx+0x10], FLAT32_JUMP - RendezvousFunnelProcStart
+ mov qword [rcx+0x18], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+ mov qword [rcx+0x20], LongModeStart - RendezvousFunnelProcStart
+ mov qword [rcx+0x28], LONG_JUMP - RendezvousFunnelProcStart
+ ret
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
new file mode 100644
index 00000000..a9a995db
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -0,0 +1,1325 @@
+/** @file
+Page Fault (#PF) handler for X64 processors
+
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+#define PAGE_TABLE_PAGES 8
+#define ACC_MAX_BIT BIT3
+
+extern UINTN mSmmShadowStackSize;
+
+LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
+BOOLEAN m1GPageTableSupport = FALSE;
+BOOLEAN mCpuSmmRestrictedMemoryAccess;
+BOOLEAN m5LevelPagingNeeded;
+X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
+
+/**
+ Disable CET.
+**/
+VOID
+EFIAPI
+DisableCet (
+ VOID
+ );
+
+/**
+ Enable CET.
+**/
+VOID
+EFIAPI
+EnableCet (
+ VOID
+ );
+
+/**
+ Check if 1-GByte pages is supported by processor or not.
+
+ @retval TRUE 1-GByte pages is supported.
+ @retval FALSE 1-GByte pages is not supported.
+
+**/
+BOOLEAN
+Is1GPageSupport (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT32 RegEdx;
+
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000001) {
+ AsmCpuid (0x80000001, NULL, NULL, NULL, &RegEdx);
+ if ((RegEdx & BIT26) != 0) {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+/**
+ The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
+ the max physical address bits is bigger than 48. Because 4-level paging can support
+ to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
+ with max physical address bits <= 48.
+
+ @retval TRUE 5-level paging enabling is needed.
+ @retval FALSE 5-level paging enabling is not needed.
+**/
+BOOLEAN
+Is5LevelPagingNeeded (
+ VOID
+ )
+{
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
+ UINT32 MaxExtendedFunctionId;
+
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
+ if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
+ } else {
+ VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
+ }
+ AsmCpuidEx (
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
+ NULL, NULL, &ExtFeatureEcx.Uint32, NULL
+ );
+ DEBUG ((
+ DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
+ VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
+ ));
+
+ if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
+ ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+/**
+ Get page table base address and the depth of the page table.
+
+ @param[out] Base Page table base address.
+ @param[out] FiveLevels TRUE means 5 level paging. FALSE means 4 level paging.
+**/
+VOID
+GetPageTable (
+ OUT UINTN *Base,
+ OUT BOOLEAN *FiveLevels OPTIONAL
+ )
+{
+ IA32_CR4 Cr4;
+
+ if (mInternalCr3 == 0) {
+ *Base = AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64;
+ if (FiveLevels != NULL) {
+ Cr4.UintN = AsmReadCr4 ();
+ *FiveLevels = (BOOLEAN)(Cr4.Bits.LA57 == 1);
+ }
+ return;
+ }
+
+ *Base = mInternalCr3;
+ if (FiveLevels != NULL) {
+ *FiveLevels = m5LevelPagingNeeded;
+ }
+}
+
+/**
+ Set sub-entries number in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] SubEntryNum Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+
+**/
+VOID
+SetSubEntriesNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 SubEntryNum
+ )
+{
+ //
+ // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
+ //
+ *Entry = BitFieldWrite64 (*Entry, 52, 60, SubEntryNum);
+}
+
+/**
+ Return sub-entries number in entry.
+
+ @param[in] Entry Pointer to entry
+
+ @return Sub-entries number based on 0:
+ 0 means there is 1 sub-entry under this entry
+ 0x1ff means there is 512 sub-entries under this entry
+**/
+UINT64
+GetSubEntriesNum (
+ IN UINT64 *Entry
+ )
+{
+ //
+ // Sub-entries number is saved in BIT52 to BIT60 (reserved field) in Entry
+ //
+ return BitFieldRead64 (*Entry, 52, 60);
+}
+
+/**
+ Calculate the maximum support address.
+
+ @return the maximum support address.
+**/
+UINT8
+CalculateMaximumSupportAddress (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT8 PhysicalAddressBits;
+ VOID *Hob;
+
+ //
+ // Get physical address bits supported.
+ //
+ Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
+ if (Hob != NULL) {
+ PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
+ } else {
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000008) {
+ AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
+ PhysicalAddressBits = (UINT8) RegEax;
+ } else {
+ PhysicalAddressBits = 36;
+ }
+ }
+ return PhysicalAddressBits;
+}
+
+/**
+ Set static page table.
+
+ @param[in] PageTable Address of page table.
+ @param[in] PhysicalAddressBits The maximum physical address bits supported.
+**/
+VOID
+SetStaticPageTable (
+ IN UINTN PageTable,
+ IN UINT8 PhysicalAddressBits
+ )
+{
+ UINT64 PageAddress;
+ UINTN NumberOfPml5EntriesNeeded;
+ UINTN NumberOfPml4EntriesNeeded;
+ UINTN NumberOfPdpEntriesNeeded;
+ UINTN IndexOfPml5Entries;
+ UINTN IndexOfPml4Entries;
+ UINTN IndexOfPdpEntries;
+ UINTN IndexOfPageDirectoryEntries;
+ UINT64 *PageMapLevel5Entry;
+ UINT64 *PageMapLevel4Entry;
+ UINT64 *PageMap;
+ UINT64 *PageDirectoryPointerEntry;
+ UINT64 *PageDirectory1GEntry;
+ UINT64 *PageDirectoryEntry;
+
+ //
+ // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses
+ // when 5-Level Paging is disabled.
+ //
+ ASSERT (PhysicalAddressBits <= 52);
+ if (!m5LevelPagingNeeded && PhysicalAddressBits > 48) {
+ PhysicalAddressBits = 48;
+ }
+
+ NumberOfPml5EntriesNeeded = 1;
+ if (PhysicalAddressBits > 48) {
+ NumberOfPml5EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 48);
+ PhysicalAddressBits = 48;
+ }
+
+ NumberOfPml4EntriesNeeded = 1;
+ if (PhysicalAddressBits > 39) {
+ NumberOfPml4EntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 39);
+ PhysicalAddressBits = 39;
+ }
+
+ NumberOfPdpEntriesNeeded = 1;
+ ASSERT (PhysicalAddressBits > 30);
+ NumberOfPdpEntriesNeeded = (UINTN) LShiftU64 (1, PhysicalAddressBits - 30);
+
+ //
+ // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
+ //
+ PageMap = (VOID *) PageTable;
+
+ PageMapLevel4Entry = PageMap;
+ PageMapLevel5Entry = NULL;
+ if (m5LevelPagingNeeded) {
+ //
+ // By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
+ //
+ PageMapLevel5Entry = PageMap;
+ }
+ PageAddress = 0;
+
+ for ( IndexOfPml5Entries = 0
+ ; IndexOfPml5Entries < NumberOfPml5EntriesNeeded
+ ; IndexOfPml5Entries++, PageMapLevel5Entry++) {
+ //
+ // Each PML5 entry points to a page of PML4 entires.
+ // So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
+ // When 5-Level Paging is disabled, below allocation happens only once.
+ //
+ if (m5LevelPagingNeeded) {
+ PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
+ if (PageMapLevel4Entry == NULL) {
+ PageMapLevel4Entry = AllocatePageTableMemory (1);
+ ASSERT(PageMapLevel4Entry != NULL);
+ ZeroMem (PageMapLevel4Entry, EFI_PAGES_TO_SIZE(1));
+
+ *PageMapLevel5Entry = (UINT64)(UINTN)PageMapLevel4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+ }
+
+ for (IndexOfPml4Entries = 0; IndexOfPml4Entries < (NumberOfPml5EntriesNeeded == 1 ? NumberOfPml4EntriesNeeded : 512); IndexOfPml4Entries++, PageMapLevel4Entry++) {
+ //
+ // Each PML4 entry points to a page of Page Directory Pointer entries.
+ //
+ PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & ~mAddressEncMask & gPhyMask);
+ if (PageDirectoryPointerEntry == NULL) {
+ PageDirectoryPointerEntry = AllocatePageTableMemory (1);
+ ASSERT(PageDirectoryPointerEntry != NULL);
+ ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
+
+ *PageMapLevel4Entry = (UINT64)(UINTN)PageDirectoryPointerEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+
+ if (m1GPageTableSupport) {
+ PageDirectory1GEntry = PageDirectoryPointerEntry;
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
+ if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
+ //
+ // Skip the < 4G entries
+ //
+ continue;
+ }
+ //
+ // Fill in the Page Directory entries
+ //
+ *PageDirectory1GEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+ }
+ } else {
+ PageAddress = BASE_4GB;
+ for (IndexOfPdpEntries = 0; IndexOfPdpEntries < (NumberOfPml4EntriesNeeded == 1 ? NumberOfPdpEntriesNeeded : 512); IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
+ if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
+ //
+ // Skip the < 4G entries
+ //
+ continue;
+ }
+ //
+ // Each Directory Pointer entries points to a page of Page Directory entires.
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
+ //
+ PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & ~mAddressEncMask & gPhyMask);
+ if (PageDirectoryEntry == NULL) {
+ PageDirectoryEntry = AllocatePageTableMemory (1);
+ ASSERT(PageDirectoryEntry != NULL);
+ ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
+
+ //
+ // Fill in a Page Directory Pointer Entries
+ //
+ *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ }
+
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
+ //
+ // Fill in the Page Directory entries
+ //
+ *PageDirectoryEntry = PageAddress | mAddressEncMask | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ Create PageTable for SMM use.
+
+ @return The address of PML4 (to set CR3).
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS Pages;
+ UINT64 *PTEntry;
+ LIST_ENTRY *FreePage;
+ UINTN Index;
+ UINTN PageFaultHandlerHookAddress;
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
+ EFI_STATUS Status;
+ UINT64 *Pml4Entry;
+ UINT64 *Pml5Entry;
+
+ //
+ // Initialize spin lock
+ //
+ InitializeSpinLock (mPFLock);
+
+ mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
+ m1GPageTableSupport = Is1GPageSupport ();
+ m5LevelPagingNeeded = Is5LevelPagingNeeded ();
+ mPhysicalAddressBits = CalculateMaximumSupportAddress ();
+ PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
+ DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
+ DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
+ DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
+ //
+ // Generate PAE page table for the first 4GB memory space
+ //
+ Pages = Gen4GPageTable (FALSE);
+
+ //
+ // Set IA32_PG_PMNT bit to mask this entry
+ //
+ PTEntry = (UINT64*)(UINTN)Pages;
+ for (Index = 0; Index < 4; Index++) {
+ PTEntry[Index] |= IA32_PG_PMNT;
+ }
+
+ //
+ // Fill Page-Table-Level4 (PML4) entry
+ //
+ Pml4Entry = (UINT64*)AllocatePageTableMemory (1);
+ ASSERT (Pml4Entry != NULL);
+ *Pml4Entry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ ZeroMem (Pml4Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml4Entry));
+
+ //
+ // Set sub-entries number
+ //
+ SetSubEntriesNum (Pml4Entry, 3);
+ PTEntry = Pml4Entry;
+
+ if (m5LevelPagingNeeded) {
+ //
+ // Fill PML5 entry
+ //
+ Pml5Entry = (UINT64*)AllocatePageTableMemory (1);
+ ASSERT (Pml5Entry != NULL);
+ *Pml5Entry = (UINTN) Pml4Entry | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ ZeroMem (Pml5Entry + 1, EFI_PAGE_SIZE - sizeof (*Pml5Entry));
+ //
+ // Set sub-entries number
+ //
+ SetSubEntriesNum (Pml5Entry, 1);
+ PTEntry = Pml5Entry;
+ }
+
+ if (mCpuSmmRestrictedMemoryAccess) {
+ //
+ // When access to non-SMRAM memory is restricted, create page table
+ // that covers all memory space.
+ //
+ SetStaticPageTable ((UINTN)PTEntry, mPhysicalAddressBits);
+ } else {
+ //
+ // Add pages to page pool
+ //
+ FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
+ ASSERT (FreePage != NULL);
+ for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
+ InsertTailList (&mPagePool, FreePage);
+ FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
+ HEAP_GUARD_NONSTOP_MODE ||
+ NULL_DETECTION_NONSTOP_MODE) {
+ //
+ // Set own Page Fault entry instead of the default one, because SMM Profile
+ // feature depends on IRET instruction to do Single Step
+ //
+ PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
+ IdtEntry->Bits.Reserved_0 = 0;
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
+ IdtEntry->Bits.OffsetUpper = (UINT32)(PageFaultHandlerHookAddress >> 32);
+ IdtEntry->Bits.Reserved_1 = 0;
+ } else {
+ //
+ // Register Smm Page Fault Handler
+ //
+ Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ //
+ // Additional SMM IDT initialization for SMM stack guard
+ //
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ InitializeIDTSmmStackGuard ();
+ }
+
+ //
+ // Return the address of PML4/PML5 (to set CR3)
+ //
+ return (UINT32)(UINTN)PTEntry;
+}
+
+/**
+ Set access record in entry.
+
+ @param[in, out] Entry Pointer to entry
+ @param[in] Acc Access record value
+
+**/
+VOID
+SetAccNum (
+ IN OUT UINT64 *Entry,
+ IN UINT64 Acc
+ )
+{
+ //
+ // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
+ //
+ *Entry = BitFieldWrite64 (*Entry, 9, 11, Acc);
+}
+
+/**
+ Return access record in entry.
+
+ @param[in] Entry Pointer to entry
+
+ @return Access record value.
+
+**/
+UINT64
+GetAccNum (
+ IN UINT64 *Entry
+ )
+{
+ //
+ // Access record is saved in BIT9 to BIT11 (reserved field) in Entry
+ //
+ return BitFieldRead64 (*Entry, 9, 11);
+}
+
+/**
+ Return and update the access record in entry.
+
+ @param[in, out] Entry Pointer to entry
+
+ @return Access record value.
+
+**/
+UINT64
+GetAndUpdateAccNum (
+ IN OUT UINT64 *Entry
+ )
+{
+ UINT64 Acc;
+
+ Acc = GetAccNum (Entry);
+ if ((*Entry & IA32_PG_A) != 0) {
+ //
+ // If this entry has been accessed, clear access flag in Entry and update access record
+ // to the initial value 7, adding ACC_MAX_BIT is to make it larger than others
+ //
+ *Entry &= ~(UINT64)(UINTN)IA32_PG_A;
+ SetAccNum (Entry, 0x7);
+ return (0x7 + ACC_MAX_BIT);
+ } else {
+ if (Acc != 0) {
+ //
+ // If the access record is not the smallest value 0, minus 1 and update the access record field
+ //
+ SetAccNum (Entry, Acc - 1);
+ }
+ }
+ return Acc;
+}
+
+/**
+ Reclaim free pages for PageFault handler.
+
+ Search the whole entries tree to find the leaf entry that has the smallest
+ access record value. Insert the page pointed by this leaf entry into the
+ page pool. And check its upper entries if need to be inserted into the page
+ pool or not.
+
+**/
+VOID
+ReclaimPages (
+ VOID
+ )
+{
+ UINT64 Pml5Entry;
+ UINT64 *Pml5;
+ UINT64 *Pml4;
+ UINT64 *Pdpt;
+ UINT64 *Pdt;
+ UINTN Pml5Index;
+ UINTN Pml4Index;
+ UINTN PdptIndex;
+ UINTN PdtIndex;
+ UINTN MinPml5;
+ UINTN MinPml4;
+ UINTN MinPdpt;
+ UINTN MinPdt;
+ UINT64 MinAcc;
+ UINT64 Acc;
+ UINT64 SubEntriesNum;
+ BOOLEAN PML4EIgnore;
+ BOOLEAN PDPTEIgnore;
+ UINT64 *ReleasePageAddress;
+ IA32_CR4 Cr4;
+ BOOLEAN Enable5LevelPaging;
+ UINT64 PFAddress;
+ UINT64 PFAddressPml5Index;
+ UINT64 PFAddressPml4Index;
+ UINT64 PFAddressPdptIndex;
+ UINT64 PFAddressPdtIndex;
+
+ Pml4 = NULL;
+ Pdpt = NULL;
+ Pdt = NULL;
+ MinAcc = (UINT64)-1;
+ MinPml4 = (UINTN)-1;
+ MinPml5 = (UINTN)-1;
+ MinPdpt = (UINTN)-1;
+ MinPdt = (UINTN)-1;
+ Acc = 0;
+ ReleasePageAddress = 0;
+ PFAddress = AsmReadCr2 ();
+ PFAddressPml5Index = BitFieldRead64 (PFAddress, 48, 48 + 8);
+ PFAddressPml4Index = BitFieldRead64 (PFAddress, 39, 39 + 8);
+ PFAddressPdptIndex = BitFieldRead64 (PFAddress, 30, 30 + 8);
+ PFAddressPdtIndex = BitFieldRead64 (PFAddress, 21, 21 + 8);
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+ Pml5 = (UINT64*)(UINTN)(AsmReadCr3 () & gPhyMask);
+
+ if (!Enable5LevelPaging) {
+ //
+ // Create one fake PML5 entry for 4-Level Paging
+ // so that the page table parsing logic only handles 5-Level page structure.
+ //
+ Pml5Entry = (UINTN) Pml5 | IA32_PG_P;
+ Pml5 = &Pml5Entry;
+ }
+
+ //
+ // First, find the leaf entry has the smallest access record value
+ //
+ for (Pml5Index = 0; Pml5Index < (Enable5LevelPaging ? (EFI_PAGE_SIZE / sizeof (*Pml4)) : 1); Pml5Index++) {
+ if ((Pml5[Pml5Index] & IA32_PG_P) == 0 || (Pml5[Pml5Index] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PML5 entry is not present or is masked, skip it
+ //
+ continue;
+ }
+ Pml4 = (UINT64*)(UINTN)(Pml5[Pml5Index] & gPhyMask);
+ for (Pml4Index = 0; Pml4Index < EFI_PAGE_SIZE / sizeof (*Pml4); Pml4Index++) {
+ if ((Pml4[Pml4Index] & IA32_PG_P) == 0 || (Pml4[Pml4Index] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PML4 entry is not present or is masked, skip it
+ //
+ continue;
+ }
+ Pdpt = (UINT64*)(UINTN)(Pml4[Pml4Index] & ~mAddressEncMask & gPhyMask);
+ PML4EIgnore = FALSE;
+ for (PdptIndex = 0; PdptIndex < EFI_PAGE_SIZE / sizeof (*Pdpt); PdptIndex++) {
+ if ((Pdpt[PdptIndex] & IA32_PG_P) == 0 || (Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PDPT entry is not present or is masked, skip it
+ //
+ if ((Pdpt[PdptIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PDPT entry is masked, we will ignore checking the PML4 entry
+ //
+ PML4EIgnore = TRUE;
+ }
+ continue;
+ }
+ if ((Pdpt[PdptIndex] & IA32_PG_PS) == 0) {
+ //
+ // It's not 1-GByte pages entry, it should be a PDPT entry,
+ // we will not check PML4 entry more
+ //
+ PML4EIgnore = TRUE;
+ Pdt = (UINT64*)(UINTN)(Pdpt[PdptIndex] & ~mAddressEncMask & gPhyMask);
+ PDPTEIgnore = FALSE;
+ for (PdtIndex = 0; PdtIndex < EFI_PAGE_SIZE / sizeof(*Pdt); PdtIndex++) {
+ if ((Pdt[PdtIndex] & IA32_PG_P) == 0 || (Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PD entry is not present or is masked, skip it
+ //
+ if ((Pdt[PdtIndex] & IA32_PG_PMNT) != 0) {
+ //
+ // If the PD entry is masked, we will not PDPT entry more
+ //
+ PDPTEIgnore = TRUE;
+ }
+ continue;
+ }
+ if ((Pdt[PdtIndex] & IA32_PG_PS) == 0) {
+ //
+ // It's not 2 MByte page table entry, it should be PD entry
+ // we will find the entry has the smallest access record value
+ //
+ PDPTEIgnore = TRUE;
+ if (PdtIndex != PFAddressPdtIndex || PdptIndex != PFAddressPdptIndex ||
+ Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
+ Acc = GetAndUpdateAccNum (Pdt + PdtIndex);
+ if (Acc < MinAcc) {
+ //
+ // If the PD entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = PdptIndex;
+ MinPdt = PdtIndex;
+ ReleasePageAddress = Pdt + PdtIndex;
+ }
+ }
+ }
+ }
+ if (!PDPTEIgnore) {
+ //
+ // If this PDPT entry has no PDT entries pointer to 4 KByte pages,
+ // it should only has the entries point to 2 MByte Pages
+ //
+ if (PdptIndex != PFAddressPdptIndex || Pml4Index != PFAddressPml4Index ||
+ Pml5Index != PFAddressPml5Index) {
+ Acc = GetAndUpdateAccNum (Pdpt + PdptIndex);
+ if (Acc < MinAcc) {
+ //
+ // If the PDPT entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = PdptIndex;
+ MinPdt = (UINTN)-1;
+ ReleasePageAddress = Pdpt + PdptIndex;
+ }
+ }
+ }
+ }
+ }
+ if (!PML4EIgnore) {
+ //
+ // If PML4 entry has no the PDPT entry pointer to 2 MByte pages,
+ // it should only has the entries point to 1 GByte Pages
+ //
+ if (Pml4Index != PFAddressPml4Index || Pml5Index != PFAddressPml5Index) {
+ Acc = GetAndUpdateAccNum (Pml4 + Pml4Index);
+ if (Acc < MinAcc) {
+ //
+ // If the PML4 entry has the smallest access record value,
+ // save the Page address to be released
+ //
+ MinAcc = Acc;
+ MinPml5 = Pml5Index;
+ MinPml4 = Pml4Index;
+ MinPdpt = (UINTN)-1;
+ MinPdt = (UINTN)-1;
+ ReleasePageAddress = Pml4 + Pml4Index;
+ }
+ }
+ }
+ }
+ }
+ //
+ // Make sure one PML4/PDPT/PD entry is selected
+ //
+ ASSERT (MinAcc != (UINT64)-1);
+
+ //
+ // Secondly, insert the page pointed by this entry into page pool and clear this entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(*ReleasePageAddress & ~mAddressEncMask & gPhyMask));
+ *ReleasePageAddress = 0;
+
+ //
+ // Lastly, check this entry's upper entries if need to be inserted into page pool
+ // or not
+ //
+ while (TRUE) {
+ if (MinPdt != (UINTN)-1) {
+ //
+ // If 4 KByte Page Table is released, check the PDPT entry
+ //
+ Pml4 = (UINT64 *) (UINTN) (Pml5[MinPml5] & gPhyMask);
+ Pdpt = (UINT64*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask);
+ SubEntriesNum = GetSubEntriesNum(Pdpt + MinPdpt);
+ if (SubEntriesNum == 0 &&
+ (MinPdpt != PFAddressPdptIndex || MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
+ //
+ // Release the empty Page Directory table if there was no more 4 KByte Page Table entry
+ // clear the Page directory entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pdpt[MinPdpt] & ~mAddressEncMask & gPhyMask));
+ Pdpt[MinPdpt] = 0;
+ //
+ // Go on checking the PML4 table
+ //
+ MinPdt = (UINTN)-1;
+ continue;
+ }
+ //
+ // Update the sub-entries filed in PDPT entry and exit
+ //
+ SetSubEntriesNum (Pdpt + MinPdpt, (SubEntriesNum - 1) & 0x1FF);
+ break;
+ }
+ if (MinPdpt != (UINTN)-1) {
+ //
+ // One 2MB Page Table is released or Page Directory table is released, check the PML4 entry
+ //
+ SubEntriesNum = GetSubEntriesNum (Pml4 + MinPml4);
+ if (SubEntriesNum == 0 && (MinPml4 != PFAddressPml4Index || MinPml5 != PFAddressPml5Index)) {
+ //
+ // Release the empty PML4 table if there was no more 1G KByte Page Table entry
+ // clear the Page directory entry
+ //
+ InsertTailList (&mPagePool, (LIST_ENTRY*)(UINTN)(Pml4[MinPml4] & ~mAddressEncMask & gPhyMask));
+ Pml4[MinPml4] = 0;
+ MinPdpt = (UINTN)-1;
+ continue;
+ }
+ //
+ // Update the sub-entries filed in PML4 entry and exit
+ //
+ SetSubEntriesNum (Pml4 + MinPml4, (SubEntriesNum - 1) & 0x1FF);
+ break;
+ }
+ //
+ // PLM4 table has been released before, exit it
+ //
+ break;
+ }
+}
+
+/**
+ Allocate free Page for PageFault handler use.
+
+ @return Page address.
+
+**/
+UINT64
+AllocPage (
+ VOID
+ )
+{
+ UINT64 RetVal;
+
+ if (IsListEmpty (&mPagePool)) {
+ //
+ // If page pool is empty, reclaim the used pages and insert one into page pool
+ //
+ ReclaimPages ();
+ }
+
+ //
+ // Get one free page and remove it from page pool
+ //
+ RetVal = (UINT64)(UINTN)mPagePool.ForwardLink;
+ RemoveEntryList (mPagePool.ForwardLink);
+ //
+ // Clean this page and return
+ //
+ ZeroMem ((VOID*)(UINTN)RetVal, EFI_PAGE_SIZE);
+ return RetVal;
+}
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+SmiDefaultPFHandler (
+ VOID
+ )
+{
+ UINT64 *PageTable;
+ UINT64 *PageTableTop;
+ UINT64 PFAddress;
+ UINTN StartBit;
+ UINTN EndBit;
+ UINT64 PTIndex;
+ UINTN Index;
+ SMM_PAGE_SIZE_TYPE PageSize;
+ UINTN NumOfPages;
+ UINTN PageAttribute;
+ EFI_STATUS Status;
+ UINT64 *UpperEntry;
+ BOOLEAN Enable5LevelPaging;
+ IA32_CR4 Cr4;
+
+ //
+ // Set default SMM page attribute
+ //
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+
+ EndBit = 0;
+ PageTableTop = (UINT64*)(AsmReadCr3 () & gPhyMask);
+ PFAddress = AsmReadCr2 ();
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 != 0);
+
+ Status = GetPlatformPageTableAttribute (PFAddress, &PageSize, &NumOfPages, &PageAttribute);
+ //
+ // If platform not support page table attribute, set default SMM page attribute
+ //
+ if (Status != EFI_SUCCESS) {
+ PageSize = SmmPageSize2M;
+ NumOfPages = 1;
+ PageAttribute = 0;
+ }
+ if (PageSize >= MaxSmmPageSizeType) {
+ PageSize = SmmPageSize2M;
+ }
+ if (NumOfPages > 512) {
+ NumOfPages = 512;
+ }
+
+ switch (PageSize) {
+ case SmmPageSize4K:
+ //
+ // BIT12 to BIT20 is Page Table index
+ //
+ EndBit = 12;
+ break;
+ case SmmPageSize2M:
+ //
+ // BIT21 to BIT29 is Page Directory index
+ //
+ EndBit = 21;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ case SmmPageSize1G:
+ if (!m1GPageTableSupport) {
+ DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
+ ASSERT (FALSE);
+ }
+ //
+ // BIT30 to BIT38 is Page Directory Pointer Table index
+ //
+ EndBit = 30;
+ PageAttribute |= (UINTN)IA32_PG_PS;
+ break;
+ default:
+ ASSERT (FALSE);
+ }
+
+ //
+ // If execute-disable is enabled, set NX bit
+ //
+ if (mXdEnabled) {
+ PageAttribute |= IA32_PG_NX;
+ }
+
+ for (Index = 0; Index < NumOfPages; Index++) {
+ PageTable = PageTableTop;
+ UpperEntry = NULL;
+ for (StartBit = Enable5LevelPaging ? 48 : 39; StartBit > EndBit; StartBit -= 9) {
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+ if ((PageTable[PTIndex] & IA32_PG_P) == 0) {
+ //
+ // If the entry is not present, allocate one page from page pool for it
+ //
+ PageTable[PTIndex] = AllocPage () | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ } else {
+ //
+ // Save the upper entry address
+ //
+ UpperEntry = PageTable + PTIndex;
+ }
+ //
+ // BIT9 to BIT11 of entry is used to save access record,
+ // initialize value is 7
+ //
+ PageTable[PTIndex] |= (UINT64)IA32_PG_A;
+ SetAccNum (PageTable + PTIndex, 7);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & gPhyMask);
+ }
+
+ PTIndex = BitFieldRead64 (PFAddress, StartBit, StartBit + 8);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ //
+ // Check if the entry has already existed, this issue may occur when the different
+ // size page entries created under the same entry
+ //
+ DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
+ DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
+ ASSERT (FALSE);
+ }
+ //
+ // Fill the new entry
+ //
+ PageTable[PTIndex] = ((PFAddress | mAddressEncMask) & gPhyMask & ~((1ull << EndBit) - 1)) |
+ PageAttribute | IA32_PG_A | PAGE_ATTRIBUTE_BITS;
+ if (UpperEntry != NULL) {
+ SetSubEntriesNum (UpperEntry, (GetSubEntriesNum (UpperEntry) + 1) & 0x1FF);
+ }
+ //
+ // Get the next page address if we need to create more page tables
+ //
+ PFAddress += (1ull << EndBit);
+ }
+}
+
+/**
+ ThePage Fault handler wrapper for SMM use.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN PFAddress;
+ UINTN GuardPageAddress;
+ UINTN ShadowStackGuardPageAddress;
+ UINTN CpuIndex;
+
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
+
+ AcquireSpinLock (mPFLock);
+
+ PFAddress = AsmReadCr2 ();
+
+ if (mCpuSmmRestrictedMemoryAccess && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If a page fault occurs in SMRAM range, it might be in a SMM stack/shadow stack guard page,
+ // or SMM page protection violation.
+ //
+ if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
+ DumpCpuContext (InterruptType, SystemContext);
+ CpuIndex = GetCpuIndex ();
+ GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
+ ShadowStackGuardPageAddress = (mSmmStackArrayBase + mSmmStackSize + EFI_PAGE_SIZE + CpuIndex * (mSmmStackSize + mSmmShadowStackSize));
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (PFAddress >= GuardPageAddress) &&
+ (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
+ DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
+ } else if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (mSmmShadowStackSize > 0) &&
+ (PFAddress >= ShadowStackGuardPageAddress) &&
+ (PFAddress < (ShadowStackGuardPageAddress + EFI_PAGE_SIZE))) {
+ DEBUG ((DEBUG_ERROR, "SMM shadow stack overflow!\n"));
+ } else {
+ if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%lx)\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
+ );
+ } else {
+ DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%lx)\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
+ );
+ }
+
+ if (HEAP_GUARD_NONSTOP_MODE) {
+ GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
+ goto Exit;
+ }
+ }
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If a page fault occurs in non-SMRAM range.
+ //
+ if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
+ if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
+ );
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If NULL pointer was just accessed
+ //
+ if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
+ (PFAddress < EFI_PAGE_SIZE)) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
+ );
+
+ if (NULL_DETECTION_NONSTOP_MODE) {
+ GuardPagePFHandler (SystemContext.SystemContextX64->ExceptionData);
+ goto Exit;
+ }
+
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ if (mCpuSmmRestrictedMemoryAccess && IsSmmCommBufferForbiddenAddress (PFAddress)) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%lx)!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextX64->Rip);
+ );
+ CpuDeadLoop ();
+ goto Exit;
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfilePFHandler (
+ SystemContext.SystemContextX64->Rip,
+ SystemContext.SystemContextX64->ExceptionData
+ );
+ } else {
+ SmiDefaultPFHandler ();
+ }
+
+Exit:
+ ReleaseSpinLock (mPFLock);
+}
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ )
+{
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINTN Index5;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ UINT64 *L5PageTable;
+ UINTN PageTableBase;
+ BOOLEAN IsSplitted;
+ BOOLEAN PageTableSplitted;
+ BOOLEAN CetEnabled;
+ BOOLEAN Enable5LevelPaging;
+
+ //
+ // Don't mark page table memory as read-only if
+ // - no restriction on access to non-SMRAM memory; or
+ // - SMM heap guard feature enabled; or
+ // BIT2: SMM page guard enabled
+ // BIT3: SMM pool guard enabled
+ // - SMM profile feature enabled
+ //
+ if (!mCpuSmmRestrictedMemoryAccess ||
+ ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) ||
+ FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ //
+ // Restriction on access to non-SMRAM memory and heap guard could not be enabled at the same time.
+ //
+ ASSERT (!(mCpuSmmRestrictedMemoryAccess &&
+ (PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0));
+
+ //
+ // Restriction on access to non-SMRAM memory and SMM profile could not be enabled at the same time.
+ //
+ ASSERT (!(mCpuSmmRestrictedMemoryAccess && FeaturePcdGet (PcdCpuSmmProfileEnable)));
+ return ;
+ }
+
+ DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
+
+ //
+ // Disable write protection, because we need mark page table to be write protected.
+ // We need *write* page table memory, to mark itself to be *read only*.
+ //
+ CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
+ if (CetEnabled) {
+ //
+ // CET must be disabled if WP is disabled.
+ //
+ DisableCet();
+ }
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ do {
+ DEBUG ((DEBUG_INFO, "Start...\n"));
+ PageTableSplitted = FALSE;
+ L5PageTable = NULL;
+
+ GetPageTable (&PageTableBase, &Enable5LevelPaging);
+
+ if (Enable5LevelPaging) {
+ L5PageTable = (UINT64 *)PageTableBase;
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)PageTableBase, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+
+ for (Index5 = 0; Index5 < (Enable5LevelPaging ? SIZE_4KB/sizeof(UINT64) : 1); Index5++) {
+ if (Enable5LevelPaging) {
+ L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L4PageTable == NULL) {
+ continue;
+ }
+ } else {
+ L4PageTable = (UINT64 *)PageTableBase;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
+ L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L3PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
+ if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
+ // 1G
+ continue;
+ }
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ continue;
+ }
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L1PageTable == NULL) {
+ continue;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+ }
+ }
+ }
+ } while (PageTableSplitted);
+
+ //
+ // Enable write protection, after page table updated.
+ //
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+ if (CetEnabled) {
+ //
+ // re-enable CET.
+ //
+ EnableCet();
+ }
+
+ return ;
+}
+
+/**
+ This function reads CR2 register when on-demand paging is enabled.
+
+ @param[out] *Cr2 Pointer to variable to hold CR2 register value.
+**/
+VOID
+SaveCr2 (
+ OUT UINTN *Cr2
+ )
+{
+ if (!mCpuSmmRestrictedMemoryAccess) {
+ //
+ // On-demand paging is enabled when access to non-SMRAM is not restricted.
+ //
+ *Cr2 = AsmReadCr2 ();
+ }
+}
+
+/**
+ This function restores CR2 register when on-demand paging is enabled.
+
+ @param[in] Cr2 Value to write into CR2 register.
+**/
+VOID
+RestoreCr2 (
+ IN UINTN Cr2
+ )
+{
+ if (!mCpuSmmRestrictedMemoryAccess) {
+ //
+ // On-demand paging is enabled when access to non-SMRAM is not restricted.
+ //
+ AsmWriteCr2 (Cr2);
+ }
+}
+
+/**
+ Return whether access to non-SMRAM is restricted.
+
+ @retval TRUE Access to non-SMRAM is restricted.
+ @retval FALSE Access to non-SMRAM is not restricted.
+**/
+BOOLEAN
+IsRestrictedMemoryAccess (
+ VOID
+ )
+{
+ return mCpuSmmRestrictedMemoryAccess;
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
new file mode 100644
index 00000000..867e5b31
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
@@ -0,0 +1,69 @@
+/** @file
+Semaphore mechanism to indicate to the BSP that an AP has exited SMM
+after SMBASE relocation.
+
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+X86_ASSEMBLY_PATCH_LABEL gPatchSmmRelocationOriginalAddressPtr32;
+X86_ASSEMBLY_PATCH_LABEL gPatchRebasedFlagAddr32;
+
+UINTN mSmmRelocationOriginalAddress;
+volatile BOOLEAN *mRebasedFlag;
+
+/**
+AP Semaphore operation in 32-bit mode while BSP runs in 64-bit mode.
+**/
+VOID
+SmmRelocationSemaphoreComplete32 (
+ VOID
+ );
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE
+ immediately after AP exits SMM.
+
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex,
+ IN volatile BOOLEAN *RebasedFlag
+ )
+{
+ SMRAM_SAVE_STATE_MAP *CpuState;
+ UINTN TempValue;
+
+ mRebasedFlag = RebasedFlag;
+ PatchInstructionX86 (
+ gPatchRebasedFlagAddr32,
+ (UINT32)(UINTN)mRebasedFlag,
+ 4
+ );
+
+ CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
+ mSmmRelocationOriginalAddress = HookReturnFromSmm (
+ CpuIndex,
+ CpuState,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete32,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
+ );
+
+ //
+ // Use temp value to fix ICC compiler warning
+ //
+ TempValue = (UINTN)&mSmmRelocationOriginalAddress;
+ PatchInstructionX86 (
+ gPatchSmmRelocationOriginalAddressPtr32,
+ (UINT32)TempValue,
+ 4
+ );
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
new file mode 100644
index 00000000..334cc6c8
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
@@ -0,0 +1,396 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2019, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmiEntry.nasm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+%include "StuffRsbNasm.inc"
+%include "Nasm.inc"
+
+;
+; Variables referenced by C code
+;
+
+%define MSR_IA32_S_CET 0x6A2
+%define MSR_IA32_CET_SH_STK_EN 0x1
+%define MSR_IA32_CET_WR_SHSTK_EN 0x2
+%define MSR_IA32_CET_ENDBR_EN 0x4
+%define MSR_IA32_CET_LEG_IW_EN 0x8
+%define MSR_IA32_CET_NO_TRACK_EN 0x10
+%define MSR_IA32_CET_SUPPRESS_DIS 0x20
+%define MSR_IA32_CET_SUPPRESS 0x400
+%define MSR_IA32_CET_TRACKER 0x800
+%define MSR_IA32_PL0_SSP 0x6A4
+%define MSR_IA32_INTERRUPT_SSP_TABLE_ADDR 0x6A8
+
+%define CR4_CET 0x800000
+
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
+;
+; Constants relating to PROCESSOR_SMM_DESCRIPTOR
+;
+%define DSC_OFFSET 0xfb00
+%define DSC_GDTPTR 0x30
+%define DSC_GDTSIZ 0x38
+%define DSC_CS 14
+%define DSC_DS 16
+%define DSC_SS 18
+%define DSC_OTHERSEG 20
+;
+; Constants relating to CPU State Save Area
+;
+%define SSM_DR6 0xffd0
+%define SSM_DR7 0xffc8
+
+%define PROTECT_MODE_CS 0x8
+%define PROTECT_MODE_DS 0x20
+%define LONG_MODE_CS 0x38
+%define TSS_SEGMENT 0x40
+%define GDT_SIZE 0x50
+
+extern ASM_PFX(SmiRendezvous)
+extern ASM_PFX(gSmiHandlerIdtr)
+extern ASM_PFX(CpuSmmDebugEntry)
+extern ASM_PFX(CpuSmmDebugExit)
+
+global ASM_PFX(gPatchSmbase)
+extern ASM_PFX(mXdSupported)
+global ASM_PFX(gPatchXdSupported)
+global ASM_PFX(gPatchMsrIa32MiscEnableSupported)
+global ASM_PFX(gPatchSmiStack)
+global ASM_PFX(gPatchSmiCr3)
+global ASM_PFX(gPatch5LevelPagingNeeded)
+global ASM_PFX(gcSmiHandlerTemplate)
+global ASM_PFX(gcSmiHandlerSize)
+
+extern ASM_PFX(mCetSupported)
+global ASM_PFX(mPatchCetSupported)
+global ASM_PFX(mPatchCetPl0Ssp)
+global ASM_PFX(mPatchCetInterruptSsp)
+global ASM_PFX(mPatchCetInterruptSspTable)
+
+ DEFAULT REL
+ SECTION .text
+
+BITS 16
+ASM_PFX(gcSmiHandlerTemplate):
+_SmiEntryPoint:
+ mov bx, _GdtDesc - _SmiEntryPoint + 0x8000
+ mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]
+ dec ax
+ mov [cs:bx], ax
+ mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]
+ mov [cs:bx + 2], eax
+o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
+ mov ax, PROTECT_MODE_CS
+ mov [cs:bx-0x2],ax
+ mov edi, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmbase):
+ lea eax, [edi + (@ProtectedMode - _SmiEntryPoint) + 0x8000]
+ mov [cs:bx-0x6],eax
+ mov ebx, cr0
+ and ebx, 0x9ffafff3
+ or ebx, 0x23
+ mov cr0, ebx
+ jmp dword 0x0:0x0
+_GdtDesc:
+ DW 0
+ DD 0
+
+BITS 32
+@ProtectedMode:
+ mov ax, PROTECT_MODE_DS
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax
+ mov esp, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmiStack):
+ jmp ProtFlatMode
+
+BITS 64
+ProtFlatMode:
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmiCr3):
+ mov cr3, rax
+ mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3
+
+ mov cl, strict byte 0 ; source operand will be patched
+ASM_PFX(gPatch5LevelPagingNeeded):
+ cmp cl, 0
+ je SkipEnable5LevelPaging
+ ;
+ ; Enable 5-Level Paging bit
+ ;
+ bts eax, 12 ; Set LA57 bit (bit #12)
+SkipEnable5LevelPaging:
+
+ mov cr4, rax ; in PreModifyMtrrs() to flush TLB.
+; Load TSS
+ sub esp, 8 ; reserve room in stack
+ sgdt [rsp]
+ mov eax, [rsp + 2] ; eax = GDT base
+ add esp, 8
+ mov dl, 0x89
+ mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+
+; enable NXE if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(gPatchXdSupported):
+ cmp al, 0
+ jz @SkipXd
+
+; If MSR_IA32_MISC_ENABLE is supported, clear XD Disable bit
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(gPatchMsrIa32MiscEnableSupported):
+ cmp al, 1
+ jz MsrIa32MiscEnableSupported
+
+; MSR_IA32_MISC_ENABLE not supported
+ sub esp, 4
+ xor rdx, rdx
+ push rdx ; don't try to restore the XD Disable bit just before RSM
+ jmp EnableNxe
+
+;
+; Check XD disable bit
+;
+MsrIa32MiscEnableSupported:
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ sub esp, 4
+ push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz EnableNxe
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+EnableNxe:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 8
+@XdDone:
+
+; Switch into @LongMode
+ push LONG_MODE_CS ; push cs hardcore here
+ call Base ; push return address for retf later
+Base:
+ add dword [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
+
+ mov ecx, MSR_EFER
+ rdmsr
+ or ah, 1 ; enable LME
+ wrmsr
+ mov rbx, cr0
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
+ mov cr0, rbx
+ retf
+@LongMode: ; long mode (64-bit code) starts here
+ mov rax, strict qword 0 ; mov rax, ASM_PFX(gSmiHandlerIdtr)
+SmiHandlerIdtrAbsAddr:
+ lidt [rax]
+ lea ebx, [rdi + DSC_OFFSET]
+ mov ax, [rbx + DSC_DS]
+ mov ds, eax
+ mov ax, [rbx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [rbx + DSC_SS]
+ mov ss, eax
+
+ mov rbx, [rsp + 0x8] ; rbx <- CpuIndex
+
+; enable CET if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(mPatchCetSupported):
+ cmp al, 0
+ jz CetDone
+
+ mov ecx, MSR_IA32_S_CET
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_PL0_SSP
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ rdmsr
+ push rdx
+ push rax
+
+ mov ecx, MSR_IA32_S_CET
+ mov eax, MSR_IA32_CET_SH_STK_EN
+ xor edx, edx
+ wrmsr
+
+ mov ecx, MSR_IA32_PL0_SSP
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetPl0Ssp):
+ xor edx, edx
+ wrmsr
+ mov rcx, cr0
+ btr ecx, 16 ; clear WP
+ mov cr0, rcx
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetInterruptSspTable):
+ xor edx, edx
+ wrmsr
+
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetInterruptSsp):
+ cmp eax, 0
+ jz CetInterruptDone
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+CetInterruptDone:
+
+ mov rcx, cr0
+ bts ecx, 16 ; set WP
+ mov cr0, rcx
+
+ mov eax, 0x668 | CR4_CET
+ mov cr4, rax
+
+ SETSSBSY
+
+CetDone:
+
+ ;
+ ; Save FP registers
+ ;
+ sub rsp, 0x200
+ fxsave64 [rsp]
+
+ add rsp, -0x20
+
+ mov rcx, rbx
+ mov rax, strict qword 0 ; call ASM_PFX(CpuSmmDebugEntry)
+CpuSmmDebugEntryAbsAddr:
+ call rax
+
+ mov rcx, rbx
+ mov rax, strict qword 0 ; call ASM_PFX(SmiRendezvous)
+SmiRendezvousAbsAddr:
+ call rax
+
+ mov rcx, rbx
+ mov rax, strict qword 0 ; call ASM_PFX(CpuSmmDebugExit)
+CpuSmmDebugExitAbsAddr:
+ call rax
+
+ add rsp, 0x20
+
+ ;
+ ; Restore FP registers
+ ;
+ fxrstor64 [rsp]
+
+ add rsp, 0x200
+
+ mov rax, strict qword 0 ; mov rax, ASM_PFX(mCetSupported)
+mCetSupportedAbsAddr:
+ mov al, [rax]
+ cmp al, 0
+ jz CetDone2
+
+ mov eax, 0x668
+ mov cr4, rax ; disable CET
+
+ mov ecx, MSR_IA32_INTERRUPT_SSP_TABLE_ADDR
+ pop rax
+ pop rdx
+ wrmsr
+
+ mov ecx, MSR_IA32_PL0_SSP
+ pop rax
+ pop rdx
+ wrmsr
+
+ mov ecx, MSR_IA32_S_CET
+ pop rax
+ pop rdx
+ wrmsr
+CetDone2:
+
+ mov rax, strict qword 0 ; lea rax, [ASM_PFX(mXdSupported)]
+mXdSupportedAbsAddr:
+ mov al, [rax]
+ cmp al, 0
+ jz .1
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .1
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.1:
+
+ StuffRsb64
+ rsm
+
+ASM_PFX(gcSmiHandlerSize) DW $ - _SmiEntryPoint
+
+;
+; Retrieve the address and fill it into mov opcode.
+;
+; It is called in the driver entry point first.
+; It is used to fix up the real address in mov opcode.
+; Then, after the code logic is copied to the different location,
+; the code can also run.
+;
+global ASM_PFX(PiSmmCpuSmiEntryFixupAddress)
+ASM_PFX(PiSmmCpuSmiEntryFixupAddress):
+ lea rax, [ASM_PFX(gSmiHandlerIdtr)]
+ lea rcx, [SmiHandlerIdtrAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(CpuSmmDebugEntry)]
+ lea rcx, [CpuSmmDebugEntryAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(SmiRendezvous)]
+ lea rcx, [SmiRendezvousAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(CpuSmmDebugExit)]
+ lea rcx, [CpuSmmDebugExitAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(mXdSupported)]
+ lea rcx, [mXdSupportedAbsAddr]
+ mov qword [rcx - 8], rax
+
+ lea rax, [ASM_PFX(mCetSupported)]
+ lea rcx, [mCetSupportedAbsAddr]
+ mov qword [rcx - 8], rax
+ ret
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
new file mode 100644
index 00000000..ec497190
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
@@ -0,0 +1,378 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmiException.nasm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+extern ASM_PFX(SmiPFHandler)
+
+global ASM_PFX(gcSmiIdtr)
+global ASM_PFX(gcSmiGdtr)
+global ASM_PFX(gcPsd)
+
+ SECTION .data
+
+NullSeg: DQ 0 ; reserved by architecture
+CodeSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeCodeSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeSsSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x93
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+DataSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x93
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+CodeSeg16:
+ DW -1
+ DW 0
+ DB 0
+ DB 0x9b
+ DB 0x8f
+ DB 0
+DataSeg16:
+ DW -1
+ DW 0
+ DB 0
+ DB 0x93
+ DB 0x8f
+ DB 0
+CodeSeg64:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xaf ; LimitHigh
+ DB 0 ; BaseHigh
+; TSS Segment for X64 specially
+TssSeg:
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x89
+ DB 0x80 ; LimitHigh
+ DB 0 ; BaseHigh
+ DD 0 ; BaseUpper
+ DD 0 ; Reserved
+GDT_SIZE equ $ - NullSeg
+
+; Create TSS Descriptor just after GDT
+TssDescriptor:
+ DD 0 ; Reserved
+ DQ 0 ; RSP0
+ DQ 0 ; RSP1
+ DQ 0 ; RSP2
+ DD 0 ; Reserved
+ DD 0 ; Reserved
+ DQ 0 ; IST1
+ DQ 0 ; IST2
+ DQ 0 ; IST3
+ DQ 0 ; IST4
+ DQ 0 ; IST5
+ DQ 0 ; IST6
+ DQ 0 ; IST7
+ DD 0 ; Reserved
+ DD 0 ; Reserved
+ DW 0 ; Reserved
+ DW 0 ; I/O Map Base Address
+TSS_DESC_SIZE equ $ - TssDescriptor
+
+;
+; This structure serves as a template for all processors.
+;
+ASM_PFX(gcPsd):
+ DB 'PSDSIG '
+ DW PSD_SIZE
+ DW 2
+ DW 1 << 2
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW 0
+ DQ 0
+ DQ 0
+ DQ 0 ; fixed in InitializeMpServiceData()
+ DQ NullSeg
+ DD GDT_SIZE
+ DD 0
+ times 24 DB 0
+ DQ 0
+PSD_SIZE equ $ - ASM_PFX(gcPsd)
+
+;
+; CODE & DATA segments for SMM runtime
+;
+CODE_SEL equ CodeSeg64 - NullSeg
+DATA_SEL equ DataSeg32 - NullSeg
+CODE32_SEL equ CodeSeg32 - NullSeg
+
+ASM_PFX(gcSmiGdtr):
+ DW GDT_SIZE - 1
+ DQ NullSeg
+
+ASM_PFX(gcSmiIdtr):
+ DW 0
+ DQ 0
+
+ DEFAULT REL
+ SECTION .text
+
+;------------------------------------------------------------------------------
+; _SmiExceptionEntryPoints is the collection of exception entrypoints followed
+; by a common exception handler.
+;
+; Stack frame would be as follows as specified in IA32 manuals:
+;
+; +---------------------+ <-- 16-byte aligned ensured by processor
+; + Old SS +
+; +---------------------+
+; + Old RSP +
+; +---------------------+
+; + RFlags +
+; +---------------------+
+; + CS +
+; +---------------------+
+; + RIP +
+; +---------------------+
+; + Error Code +
+; +---------------------+
+; + Vector Number +
+; +---------------------+
+; + RBP +
+; +---------------------+ <-- RBP, 16-byte aligned
+;
+; RSP set to odd multiple of 8 at @CommonEntryPoint means ErrCode PRESENT
+;------------------------------------------------------------------------------
+global ASM_PFX(PageFaultIdtHandlerSmmProfile)
+ASM_PFX(PageFaultIdtHandlerSmmProfile):
+ push 0xe ; Page Fault
+ test spl, 8 ; odd multiple of 8 => ErrCode present
+ jnz .0
+ push qword [rsp] ; duplicate INT# if no ErrCode
+ mov qword [rsp + 8], 0
+.0:
+ push rbp
+ mov rbp, rsp
+
+ ;
+ ; Since here the stack pointer is 16-byte aligned, so
+ ; EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
+ ; is 16-byte aligned
+ ;
+
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ push r15
+ push r14
+ push r13
+ push r12
+ push r11
+ push r10
+ push r9
+ push r8
+ push rax
+ push rcx
+ push rdx
+ push rbx
+ push qword [rbp + 48] ; RSP
+ push qword [rbp] ; RBP
+ push rsi
+ push rdi
+
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
+ movzx rax, word [rbp + 56]
+ push rax ; for ss
+ movzx rax, word [rbp + 32]
+ push rax ; for cs
+ mov rax, ds
+ push rax
+ mov rax, es
+ push rax
+ mov rax, fs
+ push rax
+ mov rax, gs
+ push rax
+
+;; UINT64 Rip;
+ push qword [rbp + 24]
+
+;; UINT64 Gdtr[2], Idtr[2];
+ sub rsp, 16
+ sidt [rsp]
+ sub rsp, 16
+ sgdt [rsp]
+
+;; UINT64 Ldtr, Tr;
+ xor rax, rax
+ str ax
+ push rax
+ sldt ax
+ push rax
+
+;; UINT64 RFlags;
+ push qword [rbp + 40]
+
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ mov rax, cr8
+ push rax
+ mov rax, cr4
+ or rax, 0x208
+ mov cr4, rax
+ push rax
+ mov rax, cr3
+ push rax
+ mov rax, cr2
+ push rax
+ xor rax, rax
+ push rax
+ mov rax, cr0
+ push rax
+
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov rax, dr7
+ push rax
+ mov rax, dr6
+ push rax
+ mov rax, dr3
+ push rax
+ mov rax, dr2
+ push rax
+ mov rax, dr1
+ push rax
+ mov rax, dr0
+ push rax
+
+;; FX_SAVE_STATE_X64 FxSaveState;
+
+ sub rsp, 512
+ mov rdi, rsp
+ fxsave [rdi]
+
+; UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push qword [rbp + 16]
+
+;; call into exception handler
+ mov rcx, [rbp + 8]
+ lea rax, [ASM_PFX(SmiPFHandler)]
+
+;; Prepare parameter and call
+ mov rdx, rsp
+ ;
+ ; Per X64 calling convention, allocate maximum parameter stack space
+ ; and make sure RSP is 16-byte aligned
+ ;
+ sub rsp, 4 * 8 + 8
+ call rax
+ add rsp, 4 * 8 + 8
+ jmp .1
+
+.1:
+;; UINT64 ExceptionData;
+ add rsp, 8
+
+;; FX_SAVE_STATE_X64 FxSaveState;
+
+ mov rsi, rsp
+ fxrstor [rsi]
+ add rsp, 512
+
+;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support debuggers
+;; that set breakpoints in interrupt/exception context
+ add rsp, 8 * 6
+
+;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
+ pop rax
+ mov cr0, rax
+ add rsp, 8 ; not for Cr1
+ pop rax
+ mov cr2, rax
+ pop rax
+ mov cr3, rax
+ pop rax
+ mov cr4, rax
+ pop rax
+ mov cr8, rax
+
+;; UINT64 RFlags;
+ pop qword [rbp + 40]
+
+;; UINT64 Ldtr, Tr;
+;; UINT64 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add rsp, 48
+
+;; UINT64 Rip;
+ pop qword [rbp + 24]
+
+;; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
+ pop rax
+ ; mov gs, rax ; not for gs
+ pop rax
+ ; mov fs, rax ; not for fs
+ ; (X64 will not use fs and gs, so we do not restore it)
+ pop rax
+ mov es, rax
+ pop rax
+ mov ds, rax
+ pop qword [rbp + 32] ; for cs
+ pop qword [rbp + 56] ; for ss
+
+;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
+;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
+ pop rdi
+ pop rsi
+ add rsp, 8 ; not for rbp
+ pop qword [rbp + 48] ; for rsp
+ pop rbx
+ pop rdx
+ pop rcx
+ pop rax
+ pop r8
+ pop r9
+ pop r10
+ pop r11
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+
+ mov rsp, rbp
+
+; Enable TF bit after page fault handler runs
+ bts dword [rsp + 40], 8 ;RFLAGS
+
+ pop rbp
+ add rsp, 16 ; skip INT# & ErrCode
+ iretq
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
new file mode 100644
index 00000000..1f30c6d4
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
@@ -0,0 +1,218 @@
+/** @file
+ SMM CPU misc functions for x64 arch specific.
+
+Copyright (c) 2015 - 2019, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+EFI_PHYSICAL_ADDRESS mGdtBuffer;
+UINTN mGdtBufferSize;
+
+extern BOOLEAN mCetSupported;
+extern UINTN mSmmShadowStackSize;
+
+X86_ASSEMBLY_PATCH_LABEL mPatchCetPl0Ssp;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSsp;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSspTable;
+UINT32 mCetPl0Ssp;
+UINT32 mCetInterruptSsp;
+UINT32 mCetInterruptSspTable;
+
+UINTN mSmmInterruptSspTables;
+
+/**
+ Initialize IDT for SMM Stack Guard.
+
+**/
+VOID
+EFIAPI
+InitializeIDTSmmStackGuard (
+ VOID
+ )
+{
+ IA32_IDT_GATE_DESCRIPTOR *IdtGate;
+
+ //
+ // If SMM Stack Guard feature is enabled, set the IST field of
+ // the interrupt gate for Page Fault Exception to be 1
+ //
+ IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
+ IdtGate += EXCEPT_IA32_PAGE_FAULT;
+ IdtGate->Bits.Reserved_0 = 1;
+}
+
+/**
+ Initialize Gdt for all processors.
+
+ @param[in] Cr3 CR3 value.
+ @param[out] GdtStepSize The step size for GDT table.
+
+ @return GdtBase for processor 0.
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)
+**/
+VOID *
+InitGdt (
+ IN UINTN Cr3,
+ OUT UINTN *GdtStepSize
+ )
+{
+ UINTN Index;
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
+ UINTN TssBase;
+ UINTN GdtTssTableSize;
+ UINT8 *GdtTssTables;
+ UINTN GdtTableStepSize;
+
+ //
+ // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
+ // on each SMI entry.
+ //
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
+ ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
+
+ //
+ // Fixup TSS descriptors
+ //
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
+ GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // Setup top of known good stack as IST1 for each processor.
+ //
+ *(UINTN *)(TssBase + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * (mSmmStackSize + mSmmShadowStackSize));
+ }
+ }
+
+ *GdtStepSize = GdtTableStepSize;
+ return GdtTssTables;
+}
+
+/**
+ Get Protected mode code segment from current GDT table.
+
+ @return Protected mode code segment value.
+**/
+UINT16
+GetProtectedModeCS (
+ VOID
+ )
+{
+ IA32_DESCRIPTOR GdtrDesc;
+ IA32_SEGMENT_DESCRIPTOR *GdtEntry;
+ UINTN GdtEntryCount;
+ UINT16 Index;
+
+ AsmReadGdtr (&GdtrDesc);
+ GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
+ GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
+ for (Index = 0; Index < GdtEntryCount; Index++) {
+ if (GdtEntry->Bits.L == 0) {
+ if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.DB == 1) {
+ break;
+ }
+ }
+ GdtEntry++;
+ }
+ ASSERT (Index != GdtEntryCount);
+ return Index * 8;
+}
+
+/**
+ Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
+
+ @param[in] ApHltLoopCode The address of the safe hlt-loop function.
+ @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
+ @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
+
+**/
+VOID
+TransferApToSafeState (
+ IN UINTN ApHltLoopCode,
+ IN UINTN TopOfStack,
+ IN UINTN NumberToFinishAddress
+ )
+{
+ AsmDisablePaging64 (
+ GetProtectedModeCS (),
+ (UINT32)ApHltLoopCode,
+ (UINT32)NumberToFinishAddress,
+ 0,
+ (UINT32)TopOfStack
+ );
+ //
+ // It should never reach here
+ //
+ ASSERT (FALSE);
+}
+
+/**
+ Initialize the shadow stack related data structure.
+
+ @param CpuIndex The index of CPU.
+ @param ShadowStack The bottom of the shadow stack for this CPU.
+**/
+VOID
+InitShadowStack (
+ IN UINTN CpuIndex,
+ IN VOID *ShadowStack
+ )
+{
+ UINTN SmmShadowStackSize;
+ UINT64 *InterruptSspTable;
+ UINT32 InterruptSsp;
+
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ SmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ SmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
+ }
+ mCetPl0Ssp = (UINT32)((UINTN)ShadowStack + SmmShadowStackSize - sizeof(UINT64));
+ PatchInstructionX86 (mPatchCetPl0Ssp, mCetPl0Ssp, 4);
+ DEBUG ((DEBUG_INFO, "mCetPl0Ssp - 0x%x\n", mCetPl0Ssp));
+ DEBUG ((DEBUG_INFO, "ShadowStack - 0x%x\n", ShadowStack));
+ DEBUG ((DEBUG_INFO, " SmmShadowStackSize - 0x%x\n", SmmShadowStackSize));
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ if (mSmmInterruptSspTables == 0) {
+ mSmmInterruptSspTables = (UINTN)AllocateZeroPool(sizeof(UINT64) * 8 * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
+ ASSERT (mSmmInterruptSspTables != 0);
+ DEBUG ((DEBUG_INFO, "mSmmInterruptSspTables - 0x%x\n", mSmmInterruptSspTables));
+ }
+
+ //
+ // The highest address on the stack (0xFF8) is a save-previous-ssp token pointing to a location that is 40 bytes away - 0xFD0.
+ // The supervisor shadow stack token is just above it at address 0xFF0. This is where the interrupt SSP table points.
+ // So when an interrupt of exception occurs, we can use SAVESSP/RESTORESSP/CLEARSSBUSY for the supervisor shadow stack,
+ // due to the reason the RETF in SMM exception handler cannot clear the BUSY flag with same CPL.
+ // (only IRET or RETF with different CPL can clear BUSY flag)
+ // Please refer to UefiCpuPkg/Library/CpuExceptionHandlerLib/X64 for the full stack frame at runtime.
+ //
+ InterruptSsp = (UINT32)((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - sizeof(UINT64));
+ *(UINT32 *)(UINTN)InterruptSsp = (InterruptSsp - sizeof(UINT64) * 4) | 0x2;
+ mCetInterruptSsp = InterruptSsp - sizeof(UINT64);
+
+ mCetInterruptSspTable = (UINT32)(UINTN)(mSmmInterruptSspTables + sizeof(UINT64) * 8 * CpuIndex);
+ InterruptSspTable = (UINT64 *)(UINTN)mCetInterruptSspTable;
+ InterruptSspTable[1] = mCetInterruptSsp;
+ PatchInstructionX86 (mPatchCetInterruptSsp, mCetInterruptSsp, 4);
+ PatchInstructionX86 (mPatchCetInterruptSspTable, mCetInterruptSspTable, 4);
+ DEBUG ((DEBUG_INFO, "mCetInterruptSsp - 0x%x\n", mCetInterruptSsp));
+ DEBUG ((DEBUG_INFO, "mCetInterruptSspTable - 0x%x\n", mCetInterruptSspTable));
+ }
+ }
+}
+
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
new file mode 100644
index 00000000..1c5648ee
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
@@ -0,0 +1,146 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmmInit.nasm
+;
+; Abstract:
+;
+; Functions for relocating SMBASE's for all processors
+;
+;-------------------------------------------------------------------------------
+
+%include "StuffRsbNasm.inc"
+
+extern ASM_PFX(SmmInitHandler)
+extern ASM_PFX(mRebasedFlag)
+extern ASM_PFX(mSmmRelocationOriginalAddress)
+
+global ASM_PFX(gPatchSmmCr3)
+global ASM_PFX(gPatchSmmCr4)
+global ASM_PFX(gPatchSmmCr0)
+global ASM_PFX(gPatchSmmInitStack)
+global ASM_PFX(gcSmiInitGdtr)
+global ASM_PFX(gcSmmInitSize)
+global ASM_PFX(gcSmmInitTemplate)
+global ASM_PFX(gPatchRebasedFlagAddr32)
+global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
+
+%define LONG_MODE_CS 0x38
+
+ DEFAULT REL
+ SECTION .text
+
+ASM_PFX(gcSmiInitGdtr):
+ DW 0
+ DQ 0
+
+global ASM_PFX(SmmStartup)
+
+BITS 16
+ASM_PFX(SmmStartup):
+ mov eax, 0x80000001 ; read capability
+ cpuid
+ mov ebx, edx ; rdmsr will change edx. keep it in ebx.
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr3):
+ mov cr3, eax
+o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr4):
+ or ah, 2 ; enable XMM registers access
+ mov cr4, eax
+ mov ecx, 0xc0000080 ; IA32_EFER MSR
+ rdmsr
+ or ah, BIT0 ; set LME bit
+ test ebx, BIT20 ; check NXE capability
+ jz .1
+ or ah, BIT3 ; set NXE bit
+.1:
+ wrmsr
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr0):
+ mov cr0, eax ; enable protected mode & paging
+ jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
+@PatchLongModeOffset:
+
+BITS 64
+@LongMode: ; long-mode starts here
+ mov rsp, strict qword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmInitStack):
+ and sp, 0xfff0 ; make sure RSP is 16-byte aligned
+ ;
+ ; According to X64 calling convention, XMM0~5 are volatile, we need to save
+ ; them before calling C-function.
+ ;
+ sub rsp, 0x60
+ movdqa [rsp], xmm0
+ movdqa [rsp + 0x10], xmm1
+ movdqa [rsp + 0x20], xmm2
+ movdqa [rsp + 0x30], xmm3
+ movdqa [rsp + 0x40], xmm4
+ movdqa [rsp + 0x50], xmm5
+
+ add rsp, -0x20
+ call ASM_PFX(SmmInitHandler)
+ add rsp, 0x20
+
+ ;
+ ; Restore XMM0~5 after calling C-function.
+ ;
+ movdqa xmm0, [rsp]
+ movdqa xmm1, [rsp + 0x10]
+ movdqa xmm2, [rsp + 0x20]
+ movdqa xmm3, [rsp + 0x30]
+ movdqa xmm4, [rsp + 0x40]
+ movdqa xmm5, [rsp + 0x50]
+
+ StuffRsb64
+ rsm
+
+BITS 16
+ASM_PFX(gcSmmInitTemplate):
+ mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
+ sub ebp, 0x30000
+ jmp ebp
+@L1:
+ DQ 0; ASM_PFX(SmmStartup)
+
+ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
+
+BITS 64
+global ASM_PFX(SmmRelocationSemaphoreComplete)
+ASM_PFX(SmmRelocationSemaphoreComplete):
+ push rax
+ mov rax, [ASM_PFX(mRebasedFlag)]
+ mov byte [rax], 1
+ pop rax
+ jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
+
+;
+; Semaphore code running in 32-bit mode
+;
+BITS 32
+global ASM_PFX(SmmRelocationSemaphoreComplete32)
+ASM_PFX(SmmRelocationSemaphoreComplete32):
+ push eax
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchRebasedFlagAddr32):
+ mov byte [eax], 1
+ pop eax
+ jmp dword [dword 0] ; destination will be patched
+ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
+
+BITS 64
+global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
+ASM_PFX(PiSmmCpuSmmInitFixupAddress):
+ lea rax, [@LongMode]
+ lea rcx, [@PatchLongModeOffset - 6]
+ mov dword [rcx], eax
+
+ lea rax, [ASM_PFX(SmmStartup)]
+ lea rcx, [@L1]
+ mov qword [rcx], rax
+ ret
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
new file mode 100644
index 00000000..4e5f9d30
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
@@ -0,0 +1,333 @@
+/** @file
+X64 processor specific functions to enable SMM profile.
+
+Copyright (c) 2012 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+//
+// Current page index.
+//
+UINTN mPFPageIndex;
+
+//
+// Pool for dynamically creating page table in page fault handler.
+//
+UINT64 mPFPageBuffer;
+
+//
+// Store the uplink information for each page being used.
+//
+UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS Pages;
+ UINT64 *PTEntry;
+
+ //
+ // Generate PAE page table for the first 4GB memory space
+ //
+ Pages = Gen4GPageTable (FALSE);
+
+ //
+ // Fill Page-Table-Level4 (PML4) entry
+ //
+ PTEntry = (UINT64*)AllocatePageTableMemory (1);
+ ASSERT (PTEntry != NULL);
+ *PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
+
+ //
+ // Return the address of PML4 (to set CR3)
+ //
+ mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;
+
+ return ;
+}
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ )
+{
+ VOID *Address;
+
+ //
+ // Pre-Allocate memory for page fault handler
+ //
+ Address = NULL;
+ Address = AllocatePages (MAX_PF_PAGE_COUNT);
+ ASSERT (Address != NULL);
+
+ mPFPageBuffer = (UINT64)(UINTN) Address;
+ mPFPageIndex = 0;
+ ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);
+ ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));
+
+ return;
+}
+
+/**
+ Allocate one page for creating 4KB-page based on 2MB-page.
+
+ @param Uplink The address of Page-Directory entry.
+
+**/
+VOID
+AcquirePage (
+ UINT64 *Uplink
+ )
+{
+ UINT64 Address;
+
+ //
+ // Get the buffer
+ //
+ Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);
+ ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);
+
+ //
+ // Cut the previous uplink if it exists and wasn't overwritten
+ //
+ if ((mPFPageUplink[mPFPageIndex] != NULL) && ((*mPFPageUplink[mPFPageIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK) == Address)) {
+ *mPFPageUplink[mPFPageIndex] = 0;
+ }
+
+ //
+ // Link & Record the current uplink
+ //
+ *Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ mPFPageUplink[mPFPageIndex] = Uplink;
+
+ mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ )
+{
+ UINTN PTIndex;
+ UINT64 Address;
+ BOOLEAN Nx;
+ BOOLEAN Existed;
+ UINTN Index;
+ UINTN PFIndex;
+ IA32_CR4 Cr4;
+ BOOLEAN Enable5LevelPaging;
+
+ ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
+
+ Cr4.UintN = AsmReadCr4 ();
+ Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
+
+ //
+ // If page fault address is 4GB above.
+ //
+
+ //
+ // Check if page fault address has existed in page table.
+ // If it exists in page table but page fault is generated,
+ // there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
+ //
+ Existed = FALSE;
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
+ PTIndex = 0;
+ if (Enable5LevelPaging) {
+ PTIndex = BitFieldRead64 (PFAddress, 48, 56);
+ }
+ if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {
+ // PML5E
+ if (Enable5LevelPaging) {
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ }
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ // PML4E
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);
+ if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
+ // PDPTE
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);
+ // PD
+ if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
+ //
+ // 2MB page
+ //
+ Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ if ((Address & ~((1ull << 21) - 1)) == ((PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 21) - 1)))) {
+ Existed = TRUE;
+ }
+ } else {
+ //
+ // 4KB page
+ //
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
+ if (PageTable != 0) {
+ //
+ // When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
+ //
+ PTIndex = BitFieldRead64 (PFAddress, 12, 20);
+ Address = (UINT64)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ if ((Address & ~((1ull << 12) - 1)) == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
+ Existed = TRUE;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // If page entry does not existed in page table at all, create a new entry.
+ //
+ if (!Existed) {
+
+ if (IsAddressValid (PFAddress, &Nx)) {
+ //
+ // If page fault address above 4GB is in protected range but it causes a page fault exception,
+ // Will create a page entry for this page fault address, make page table entry as present/rw and execution-disable.
+ // this access is not saved into SMM profile data.
+ //
+ *IsValidPFAddress = TRUE;
+ }
+
+ //
+ // Create one entry in page table for page fault address.
+ //
+ SmiDefaultPFHandler ();
+ //
+ // Find the page table entry created just now.
+ //
+ PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
+ PFAddress = AsmReadCr2 ();
+ // PML5E
+ if (Enable5LevelPaging) {
+ PTIndex = BitFieldRead64 (PFAddress, 48, 56);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ }
+ // PML4E
+ PTIndex = BitFieldRead64 (PFAddress, 39, 47);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ // PDPTE
+ PTIndex = BitFieldRead64 (PFAddress, 30, 38);
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ // PD
+ PTIndex = BitFieldRead64 (PFAddress, 21, 29);
+ Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;
+ //
+ // Check if 2MB-page entry need be changed to 4KB-page entry.
+ //
+ if (IsAddressSplit (Address)) {
+ AcquirePage (&PageTable[PTIndex]);
+
+ // PTE
+ PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
+ for (Index = 0; Index < 512; Index++) {
+ PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
+ if (!IsAddressValid (Address, &Nx)) {
+ PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
+ }
+ if (Nx && mXdSupported) {
+ PageTable[Index] = PageTable[Index] | IA32_PG_NX;
+ }
+ if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
+ PTIndex = Index;
+ }
+ Address += SIZE_4KB;
+ } // end for PT
+ } else {
+ //
+ // Update 2MB page entry.
+ //
+ if (!IsAddressValid (Address, &Nx)) {
+ //
+ // Patch to remove present flag and rw flag.
+ //
+ PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
+ }
+ //
+ // Set XD bit to 1
+ //
+ if (Nx && mXdSupported) {
+ PageTable[PTIndex] = PageTable[PTIndex] | IA32_PG_NX;
+ }
+ }
+ }
+
+ //
+ // Record old entries with non-present status
+ // Old entries include the memory which instruction is at and the memory which instruction access.
+ //
+ //
+ ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
+ if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
+ PFIndex = mPFEntryCount[CpuIndex];
+ mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
+ mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
+ mPFEntryCount[CpuIndex]++;
+ }
+
+ //
+ // Add present flag or clear XD flag to make page fault handler succeed.
+ //
+ PageTable[PTIndex] |= (UINT64)(PAGE_ATTRIBUTE_BITS);
+ if ((ErrorCode & IA32_PF_EC_ID) != 0) {
+ //
+ // If page fault is caused by instruction fetch, clear XD bit in the entry.
+ //
+ PageTable[PTIndex] &= ~IA32_PG_NX;
+ }
+
+ return;
+}
+
+/**
+ Clear TF in FLAGS.
+
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+
+**/
+VOID
+ClearTrapFlag (
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;
+}
diff --git a/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
new file mode 100644
index 00000000..36a1c287
--- /dev/null
+++ b/src/VBox/Devices/EFI/Firmware/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.h
@@ -0,0 +1,99 @@
+/** @file
+X64 processor specific header file to enable SMM profile.
+
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _SMM_PROFILE_ARCH_H_
+#define _SMM_PROFILE_ARCH_H_
+
+#pragma pack (1)
+
+typedef struct _MSR_DS_AREA_STRUCT {
+ UINT64 BTSBufferBase;
+ UINT64 BTSIndex;
+ UINT64 BTSAbsoluteMaximum;
+ UINT64 BTSInterruptThreshold;
+ UINT64 PEBSBufferBase;
+ UINT64 PEBSIndex;
+ UINT64 PEBSAbsoluteMaximum;
+ UINT64 PEBSInterruptThreshold;
+ UINT64 PEBSCounterReset[2];
+ UINT64 Reserved;
+} MSR_DS_AREA_STRUCT;
+
+typedef struct _BRANCH_TRACE_RECORD {
+ UINT64 LastBranchFrom;
+ UINT64 LastBranchTo;
+ UINT64 Rsvd0 : 4;
+ UINT64 BranchPredicted : 1;
+ UINT64 Rsvd1 : 59;
+} BRANCH_TRACE_RECORD;
+
+typedef struct _PEBS_RECORD {
+ UINT64 Rflags;
+ UINT64 LinearIP;
+ UINT64 Rax;
+ UINT64 Rbx;
+ UINT64 Rcx;
+ UINT64 Rdx;
+ UINT64 Rsi;
+ UINT64 Rdi;
+ UINT64 Rbp;
+ UINT64 Rsp;
+ UINT64 R8;
+ UINT64 R9;
+ UINT64 R10;
+ UINT64 R11;
+ UINT64 R12;
+ UINT64 R13;
+ UINT64 R14;
+ UINT64 R15;
+} PEBS_RECORD;
+
+#pragma pack ()
+
+#define PHYSICAL_ADDRESS_MASK ((1ull << 52) - SIZE_4KB)
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ );
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ );
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_ARCH_H_