* [edk2-devel] [PATCH v1 13/13] UefiCpuPkg/PiSmmCpuDxeSmm: Remove SmBases relocation logic
2024-04-10 13:57 [edk2-devel] [PATCH v1 00/13] Add SmmRelocationLib Wu, Jiaxin
@ 2024-04-10 13:57 ` Wu, Jiaxin
0 siblings, 0 replies; 2+ messages in thread
From: Wu, Jiaxin @ 2024-04-10 13:57 UTC (permalink / raw)
To: devel; +Cc: Ray Ni, Zeng Star, Gerd Hoffmann, Rahul Kumar
This patch is to remove legacy SmBase relocation in
PiSmmCpuDxeSmm Driver, and the SmBase relocation
behavior will be in the SmmRelocationInit interface:
1. Relocate smbases for each processor.
2. Create the gSmmBaseHobGuid HOB.
Then, PiSmmCpuDxeSmm driver can be simplified to:
1. Consume the gSmmBaseHobGuid for the smbase.
2. ExecuteFirstSmiInit for early SMM Init.
Cc: Ray Ni <ray.ni@intel.com>
Cc: Zeng Star <star.zeng@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com>
---
UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c | 21 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c | 42 ----
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm | 96 --------
UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c | 6 +-
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c | 322 ++-------------------------
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h | 98 --------
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf | 4 -
UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c | 69 ------
UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c | 69 ------
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm | 146 ------------
10 files changed, 30 insertions(+), 843 deletions(-)
delete mode 100644 UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
delete mode 100644 UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
delete mode 100644 UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
delete mode 100644 UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
index b14c289a27..d67fb49890 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
@@ -639,27 +639,14 @@ InitializeCpuProcedure (
//
InitializeCpuBeforeRebase (IsBsp);
}
if (IsBsp) {
- DEBUG ((DEBUG_INFO, "SmmRestoreCpu: mSmmRelocated is %d\n", mSmmRelocated));
-
//
- // Check whether Smm Relocation is done or not.
- // If not, will do the SmmBases Relocation here!!!
+ // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
//
- if (!mSmmRelocated) {
- //
- // Restore SMBASE for BSP and all APs
- //
- SmmRelocateBases ();
- } else {
- //
- // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
- //
- ExecuteFirstSmiInit ();
- }
+ ExecuteFirstSmiInit ();
}
//
// Skip initialization if mAcpiCpuData is not valid
//
@@ -978,13 +965,13 @@ InitSmmS3ResumeState (
SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
if (SmmS3ResumeState->SmmS3StackBase == 0) {
SmmS3ResumeState->SmmS3StackSize = 0;
}
- SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
+ SmmS3ResumeState->SmmS3Cr0 = (UINT32)AsmReadCr0 ();
SmmS3ResumeState->SmmS3Cr3 = Cr3;
- SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
+ SmmS3ResumeState->SmmS3Cr4 = (UINT32)AsmReadCr4 ();
if (sizeof (UINTN) == sizeof (UINT64)) {
SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
deleted file mode 100644
index a9fcc89dda..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/** @file
-Semaphore mechanism to indicate to the BSP that an AP has exited SMM
-after SMBASE relocation.
-
-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
-SPDX-License-Identifier: BSD-2-Clause-Patent
-
-**/
-
-#include "PiSmmCpuDxeSmm.h"
-
-UINTN mSmmRelocationOriginalAddress;
-volatile BOOLEAN *mRebasedFlag;
-
-/**
- Hook return address of SMM Save State so that semaphore code
- can be executed immediately after AP exits SMM to indicate to
- the BSP that an AP has exited SMM after SMBASE relocation.
-
- @param[in] CpuIndex The processor index.
- @param[in] RebasedFlag A pointer to a flag that is set to TRUE
- immediately after AP exits SMM.
-
-**/
-VOID
-SemaphoreHook (
- IN UINTN CpuIndex,
- IN volatile BOOLEAN *RebasedFlag
- )
-{
- SMRAM_SAVE_STATE_MAP *CpuState;
-
- mRebasedFlag = RebasedFlag;
-
- CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
- mSmmRelocationOriginalAddress = (UINTN)HookReturnFromSmm (
- CpuIndex,
- CpuState,
- (UINT64)(UINTN)&SmmRelocationSemaphoreComplete,
- (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
- );
-}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
deleted file mode 100644
index b5e77a1a5b..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
+++ /dev/null
@@ -1,96 +0,0 @@
-;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
-; SPDX-License-Identifier: BSD-2-Clause-Patent
-;
-; Module Name:
-;
-; SmmInit.nasm
-;
-; Abstract:
-;
-; Functions for relocating SMBASE's for all processors
-;
-;-------------------------------------------------------------------------------
-
-%include "StuffRsbNasm.inc"
-
-extern ASM_PFX(SmmInitHandler)
-extern ASM_PFX(mRebasedFlag)
-extern ASM_PFX(mSmmRelocationOriginalAddress)
-
-global ASM_PFX(gPatchSmmCr3)
-global ASM_PFX(gPatchSmmCr4)
-global ASM_PFX(gPatchSmmCr0)
-global ASM_PFX(gPatchSmmInitStack)
-global ASM_PFX(gcSmiInitGdtr)
-global ASM_PFX(gcSmmInitSize)
-global ASM_PFX(gcSmmInitTemplate)
-
-%define PROTECT_MODE_CS 0x8
-%define PROTECT_MODE_DS 0x20
-
- SECTION .text
-
-ASM_PFX(gcSmiInitGdtr):
- DW 0
- DQ 0
-
-global ASM_PFX(SmmStartup)
-
-BITS 16
-ASM_PFX(SmmStartup):
- mov eax, 0x80000001 ; read capability
- cpuid
- mov ebx, edx ; rdmsr will change edx. keep it in ebx.
- and ebx, BIT20 ; extract NX capability bit
- shr ebx, 9 ; shift bit to IA32_EFER.NXE[BIT11] position
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr3):
- mov cr3, eax
-o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr4):
- mov cr4, eax
- mov ecx, 0xc0000080 ; IA32_EFER MSR
- rdmsr
- or eax, ebx ; set NXE bit if NX is available
- wrmsr
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr0):
- mov di, PROTECT_MODE_DS
- mov cr0, eax
- jmp PROTECT_MODE_CS : dword @32bit
-
-BITS 32
-@32bit:
- mov ds, edi
- mov es, edi
- mov fs, edi
- mov gs, edi
- mov ss, edi
- mov esp, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmInitStack):
- call ASM_PFX(SmmInitHandler)
- StuffRsb32
- rsm
-
-BITS 16
-ASM_PFX(gcSmmInitTemplate):
- mov ebp, ASM_PFX(SmmStartup)
- sub ebp, 0x30000
- jmp ebp
-
-ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
-
-BITS 32
-global ASM_PFX(SmmRelocationSemaphoreComplete)
-ASM_PFX(SmmRelocationSemaphoreComplete):
- push eax
- mov eax, [ASM_PFX(mRebasedFlag)]
- mov byte [eax], 1
- pop eax
- jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
-
-global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
-ASM_PFX(PiSmmCpuSmmInitFixupAddress):
- ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index 081f0c1501..4180a25432 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -1511,22 +1511,20 @@ SmiRendezvous (
UINTN Index;
UINTN Cr2;
ASSERT (CpuIndex < mMaxNumberOfCpus);
- if (mSmmRelocated) {
- ASSERT (mSmmInitialized != NULL);
- }
+ ASSERT (mSmmInitialized != NULL);
//
// Save Cr2 because Page Fault exception in SMM may override its value,
// when using on-demand paging for above 4G memory.
//
Cr2 = 0;
SaveCr2 (&Cr2);
- if (mSmmRelocated && !mSmmInitialized[CpuIndex]) {
+ if (!mSmmInitialized[CpuIndex]) {
//
// Perform SmmInitHandler for CpuIndex
//
SmmInitHandler ();
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
index 499f979d34..bdf524e33d 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
@@ -54,15 +54,10 @@ CPU_HOT_PLUG_DATA mCpuHotPlugData = {
//
// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
//
SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
-//
-// SMM Relocation variables
-//
-volatile BOOLEAN *mRebased;
-
///
/// Handle for the SMM CPU Protocol
///
EFI_HANDLE mSmmCpuHandle = NULL;
@@ -83,11 +78,10 @@ EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
EdkiiSmmClearMemoryAttributes
};
EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
-BOOLEAN mSmmRelocated = FALSE;
volatile BOOLEAN *mSmmInitialized = NULL;
UINT32 mBspApicId = 0;
//
// SMM stack information
@@ -133,16 +127,10 @@ SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
UINTN mSmmCpuSmramRangeCount;
UINT8 mPhysicalAddressBits;
-//
-// Control register contents saved for SMM S3 resume state initialization.
-//
-UINT32 mSmmCr0;
-UINT32 mSmmCr4;
-
/**
Initialize IDT to setup exception handlers for SMM.
**/
VOID
@@ -348,14 +336,10 @@ SmmInitHandler (
{
UINT32 ApicId;
UINTN Index;
BOOLEAN IsBsp;
- //
- // Update SMM IDT entries' code segment and load IDT
- //
- AsmWriteIdtr (&gcSmiIdtr);
ApicId = GetApicId ();
IsBsp = (BOOLEAN)(mBspApicId == ApicId);
ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
@@ -386,17 +370,10 @@ SmmInitHandler (
// Initialize private data during S3 resume
//
InitializeMpSyncData ();
}
- if (!mSmmRelocated) {
- //
- // Hook return after RSM to set SMM re-based flag
- //
- SemaphoreHook (Index, &mRebased[Index]);
- }
-
PERF_CODE (
MpPerfEnd (Index, SMM_MP_PERF_PROCEDURE_ID (SmmInitHandler));
);
return;
@@ -454,111 +431,10 @@ ExecuteFirstSmiInit (
}
PERF_FUNCTION_END ();
}
-/**
- Relocate SmmBases for each processor.
-
- Execute on first boot and all S3 resumes
-
-**/
-VOID
-EFIAPI
-SmmRelocateBases (
- VOID
- )
-{
- UINT8 BakBuf[BACK_BUF_SIZE];
- SMRAM_SAVE_STATE_MAP BakBuf2;
- SMRAM_SAVE_STATE_MAP *CpuStatePtr;
- UINT8 *U8Ptr;
- UINTN Index;
- UINTN BspIndex;
-
- PERF_FUNCTION_BEGIN ();
-
- //
- // Make sure the reserved size is large enough for procedure SmmInitTemplate.
- //
- ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
-
- //
- // Patch ASM code template with current CR0, CR3, and CR4 values
- //
- mSmmCr0 = (UINT32)AsmReadCr0 ();
- PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
- PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
- mSmmCr4 = (UINT32)AsmReadCr4 ();
- PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
-
- //
- // Patch GDTR for SMM base relocation
- //
- gcSmiInitGdtr.Base = gcSmiGdtr.Base;
- gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
-
- U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
- CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
-
- //
- // Backup original contents at address 0x38000
- //
- CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
- CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
-
- //
- // Load image for relocation
- //
- CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
-
- //
- // Retrieve the local APIC ID of current processor
- //
- mBspApicId = GetApicId ();
-
- //
- // Relocate SM bases for all APs
- // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
- //
- BspIndex = (UINTN)-1;
- for (Index = 0; Index < mNumberOfCpus; Index++) {
- mRebased[Index] = FALSE;
- if (mBspApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
- SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
- //
- // Wait for this AP to finish its 1st SMI
- //
- while (!mRebased[Index]) {
- }
- } else {
- //
- // BSP will be Relocated later
- //
- BspIndex = Index;
- }
- }
-
- //
- // Relocate BSP's SMM base
- //
- ASSERT (BspIndex != (UINTN)-1);
- SendSmiIpi (mBspApicId);
- //
- // Wait for the BSP to finish its 1st SMI
- //
- while (!mRebased[BspIndex]) {
- }
-
- //
- // Restore contents at address 0x38000
- //
- CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
- CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
- PERF_FUNCTION_END ();
-}
-
/**
SMM Ready To Lock event notification handler.
The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
perform additional lock actions that must be performed from SMM on the next SMI.
@@ -881,12 +757,10 @@ PiCpuSmmEntry (
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
UINTN Index;
- VOID *Buffer;
- UINTN BufferPages;
UINTN TileCodeSize;
UINTN TileDataSize;
UINTN TileSize;
UINT8 *Stacks;
VOID *Registration;
@@ -901,11 +775,10 @@ PiCpuSmmEntry (
PERF_FUNCTION_BEGIN ();
//
// Initialize address fixup
//
- PiSmmCpuSmmInitFixupAddress ();
PiSmmCpuSmiEntryFixupAddress ();
//
// Initialize Debug Agent to support source level debug in SMM code
//
@@ -1113,65 +986,34 @@ PiCpuSmmEntry (
// context must be reduced.
//
ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
//
- // Retrive the allocated SmmBase from gSmmBaseHobGuid. If found,
+ // Retrieve the allocated SmmBase from gSmmBaseHobGuid. If found,
// means the SmBase relocation has been done.
//
mCpuHotPlugData.SmBase = NULL;
Status = GetSmBase (mMaxNumberOfCpus, &mCpuHotPlugData.SmBase);
- if (Status == EFI_OUT_OF_RESOURCES) {
- ASSERT (Status != EFI_OUT_OF_RESOURCES);
+ ASSERT (!EFI_ERROR (Status));
+ if (EFI_ERROR (Status)) {
CpuDeadLoop ();
}
- if (!EFI_ERROR (Status)) {
- ASSERT (mCpuHotPlugData.SmBase != NULL);
- //
- // Check whether the Required TileSize is enough.
- //
- if (TileSize > SIZE_8KB) {
- DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
- FreePool (mCpuHotPlugData.SmBase);
- FreePool (gSmmCpuPrivate->ProcessorInfo);
- CpuDeadLoop ();
- return RETURN_BUFFER_TOO_SMALL;
- }
-
- mSmmRelocated = TRUE;
- } else {
- ASSERT (Status == EFI_NOT_FOUND);
- ASSERT (mCpuHotPlugData.SmBase == NULL);
- //
- // When the HOB doesn't exist, allocate new SMBASE itself.
- //
- DEBUG ((DEBUG_INFO, "PiCpuSmmEntry: gSmmBaseHobGuid not found!\n"));
-
- mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
- if (mCpuHotPlugData.SmBase == NULL) {
- ASSERT (mCpuHotPlugData.SmBase != NULL);
- CpuDeadLoop ();
- }
-
- //
- // very old processors (i486 + pentium) need 32k not 4k alignment, exclude them.
- //
- ASSERT (FamilyId >= 6);
- //
- // Allocate buffer for all of the tiles.
- //
- BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
- Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
- if (Buffer == NULL) {
- DEBUG ((DEBUG_ERROR, "Failed to allocate %Lu pages.\n", (UINT64)BufferPages));
- CpuDeadLoop ();
- return EFI_OUT_OF_RESOURCES;
- }
+ //
+ // ASSERT SmBase has been relocated.
+ //
+ ASSERT (mCpuHotPlugData.SmBase != NULL);
- ASSERT (Buffer != NULL);
- DEBUG ((DEBUG_INFO, "New Allcoated SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));
+ //
+ // Check whether the Required TileSize is enough.
+ //
+ if (TileSize > SIZE_8KB) {
+ DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
+ FreePool (mCpuHotPlugData.SmBase);
+ FreePool (gSmmCpuPrivate->ProcessorInfo);
+ CpuDeadLoop ();
+ return RETURN_BUFFER_TOO_SMALL;
}
//
// Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
//
@@ -1198,14 +1040,10 @@ PiCpuSmmEntry (
// Retrieve APIC ID of each enabled processor from the MP Services protocol.
// Also compute the SMBASE address, CPU Save State address, and CPU Save state
// size for each CPU in the platform
//
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
- if (!mSmmRelocated) {
- mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
- }
-
gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
if (Index < mNumberOfCpus) {
@@ -1297,45 +1135,15 @@ PiCpuSmmEntry (
DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
}
- //
- // Set SMI stack for SMM base relocation
- //
- PatchInstructionX86 (
- gPatchSmmInitStack,
- (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),
- sizeof (UINTN)
- );
-
//
// Initialize IDT
//
InitializeSmmIdt ();
- //
- // Check whether Smm Relocation is done or not.
- // If not, will do the SmmBases Relocation here!!!
- //
- if (!mSmmRelocated) {
- //
- // Relocate SMM Base addresses to the ones allocated from SMRAM
- //
- mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
- ASSERT (mRebased != NULL);
- SmmRelocateBases ();
-
- //
- // Call hook for BSP to perform extra actions in normal mode after all
- // SMM base addresses have been relocated on all CPUs
- //
- SmmCpuFeaturesSmmRelocationComplete ();
- }
-
- DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
-
//
// SMM Time initialization
//
InitializeSmmTimer ();
@@ -1368,19 +1176,19 @@ PiCpuSmmEntry (
//
// For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.
// Those MSRs & CSRs must be configured before normal SMI sources happen.
// So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
//
- if (mSmmRelocated) {
- ExecuteFirstSmiInit ();
+ ExecuteFirstSmiInit ();
- //
- // Call hook for BSP to perform extra actions in normal mode after all
- // SMM base addresses have been relocated on all CPUs
- //
- SmmCpuFeaturesSmmRelocationComplete ();
- }
+ //
+ // Call hook for BSP to perform extra actions in normal mode after all
+ // SMM base addresses have been relocated on all CPUs
+ //
+ SmmCpuFeaturesSmmRelocationComplete ();
+
+ DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
//
// Fill in SMM Reserved Regions
//
gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
@@ -1767,92 +1575,10 @@ AllocateCodePages (
}
return (VOID *)(UINTN)Memory;
}
-/**
- Allocate aligned pages for code.
-
- @param[in] Pages Number of pages to be allocated.
- @param[in] Alignment The requested alignment of the allocation.
- Must be a power of two.
- If Alignment is zero, then byte alignment is used.
-
- @return Allocated memory.
-**/
-VOID *
-AllocateAlignedCodePages (
- IN UINTN Pages,
- IN UINTN Alignment
- )
-{
- EFI_STATUS Status;
- EFI_PHYSICAL_ADDRESS Memory;
- UINTN AlignedMemory;
- UINTN AlignmentMask;
- UINTN UnalignedPages;
- UINTN RealPages;
-
- //
- // Alignment must be a power of two or zero.
- //
- ASSERT ((Alignment & (Alignment - 1)) == 0);
-
- if (Pages == 0) {
- return NULL;
- }
-
- if (Alignment > EFI_PAGE_SIZE) {
- //
- // Calculate the total number of pages since alignment is larger than page size.
- //
- AlignmentMask = Alignment - 1;
- RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
- //
- // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
- //
- ASSERT (RealPages > Pages);
-
- Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
- if (EFI_ERROR (Status)) {
- return NULL;
- }
-
- AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;
- UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);
- if (UnalignedPages > 0) {
- //
- // Free first unaligned page(s).
- //
- Status = gSmst->SmmFreePages (Memory, UnalignedPages);
- ASSERT_EFI_ERROR (Status);
- }
-
- Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
- UnalignedPages = RealPages - Pages - UnalignedPages;
- if (UnalignedPages > 0) {
- //
- // Free last unaligned page(s).
- //
- Status = gSmst->SmmFreePages (Memory, UnalignedPages);
- ASSERT_EFI_ERROR (Status);
- }
- } else {
- //
- // Do not over-allocate pages in this case.
- //
- Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
- if (EFI_ERROR (Status)) {
- return NULL;
- }
-
- AlignedMemory = (UINTN)Memory;
- }
-
- return (VOID *)AlignedMemory;
-}
-
/**
Perform the remaining tasks.
**/
VOID
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
index 7f244ea803..097a8c1b25 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -186,18 +186,10 @@ typedef struct {
// Code select value
//
#define PROTECT_MODE_CODE_SEGMENT 0x08
#define LONG_MODE_CODE_SEGMENT 0x38
-//
-// The size 0x20 must be bigger than
-// the size of template code of SmmInit. Currently,
-// the size of SmmInit requires the 0x16 Bytes buffer
-// at least.
-//
-#define BACK_BUF_SIZE 0x20
-
#define EXCEPTION_VECTOR_NUMBER 0x20
#define INVALID_APIC_ID 0xFFFFFFFFFFFFFFFFULL
//
@@ -373,22 +365,13 @@ SmmInitHandler (
VOID
ExecuteFirstSmiInit (
VOID
);
-extern BOOLEAN mSmmRelocated;
extern volatile BOOLEAN *mSmmInitialized;
extern UINT32 mBspApicId;
-extern CONST UINT8 gcSmmInitTemplate[];
-extern CONST UINT16 gcSmmInitSize;
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr0;
-extern UINT32 mSmmCr0;
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr3;
-extern UINT32 mSmmCr4;
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr4;
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmInitStack;
X86_ASSEMBLY_PATCH_LABEL mPatchCetSupported;
extern BOOLEAN mCetSupported;
/**
Semaphore operation for all processor relocate SMMBase.
@@ -473,11 +456,10 @@ extern UINT64 gPhyMask;
extern SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData;
extern UINTN mSmmStackArrayBase;
extern UINTN mSmmStackArrayEnd;
extern UINTN mSmmStackSize;
extern EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService;
-extern IA32_DESCRIPTOR gcSmiInitGdtr;
extern SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
extern UINTN mSemaphoreSize;
extern SPIN_LOCK *mPFLock;
extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;
extern EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
@@ -791,22 +773,10 @@ VOID
FindSmramInfo (
OUT UINT32 *SmrrBase,
OUT UINT32 *SmrrSize
);
-/**
- Relocate SmmBases for each processor.
-
- Execute on first boot and all S3 resumes
-
-**/
-VOID
-EFIAPI
-SmmRelocateBases (
- VOID
- );
-
/**
Page Fault handler for SMM use.
@param InterruptType Defines the type of interrupt or exception that
occurred on the processor.This parameter is processor architecture specific.
@@ -847,62 +817,19 @@ PerformPreTasks (
VOID
InitMsrSpinLockByIndex (
IN UINT32 MsrIndex
);
-/**
- Hook return address of SMM Save State so that semaphore code
- can be executed immediately after AP exits SMM to indicate to
- the BSP that an AP has exited SMM after SMBASE relocation.
-
- @param[in] CpuIndex The processor index.
- @param[in] RebasedFlag A pointer to a flag that is set to TRUE
- immediately after AP exits SMM.
-
-**/
-VOID
-SemaphoreHook (
- IN UINTN CpuIndex,
- IN volatile BOOLEAN *RebasedFlag
- );
-
/**
Configure SMM Code Access Check feature for all processors.
SMM Feature Control MSR will be locked after configuration.
**/
VOID
ConfigSmmCodeAccessCheck (
VOID
);
-/**
- Hook the code executed immediately after an RSM instruction on the currently
- executing CPU. The mode of code executed immediately after RSM must be
- detected, and the appropriate hook must be selected. Always clear the auto
- HALT restart flag if it is set.
-
- @param[in] CpuIndex The processor index for the currently
- executing CPU.
- @param[in] CpuState Pointer to SMRAM Save State Map for the
- currently executing CPU.
- @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to
- 32-bit mode from 64-bit SMM.
- @param[in] NewInstructionPointer Instruction pointer to use if resuming to
- same mode as SMM.
-
- @retval The value of the original instruction pointer before it was hooked.
-
-**/
-UINT64
-EFIAPI
-HookReturnFromSmm (
- IN UINTN CpuIndex,
- SMRAM_SAVE_STATE_MAP *CpuState,
- UINT64 NewInstructionPointer32,
- UINT64 NewInstructionPointer
- );
-
/**
Get the size of the SMI Handler in bytes.
@retval The size, in bytes, of the SMI Handler.
@@ -1103,26 +1030,10 @@ AllocatePageTableMemory (
VOID *
AllocateCodePages (
IN UINTN Pages
);
-/**
- Allocate aligned pages for code.
-
- @param[in] Pages Number of pages to be allocated.
- @param[in] Alignment The requested alignment of the allocation.
- Must be a power of two.
- If Alignment is zero, then byte alignment is used.
-
- @return Allocated memory.
-**/
-VOID *
-AllocateAlignedCodePages (
- IN UINTN Pages,
- IN UINTN Alignment
- );
-
//
// S3 related global variable and function prototype.
//
extern BOOLEAN mSmmS3Flag;
@@ -1300,19 +1211,10 @@ EdkiiSmmGetMemoryAttributes (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 *Attributes
);
-/**
- This function fixes up the address of the global variable or function
- referred in SmmInit assembly files to be the absolute address.
-**/
-VOID
-EFIAPI
-PiSmmCpuSmmInitFixupAddress (
- );
-
/**
This function fixes up the address of the global variable or function
referred in SmiEntry assembly files to be the absolute address.
**/
VOID
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
index a018954ed7..1a230ad2d0 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
@@ -45,28 +45,24 @@
SmmMp.c
SmmMpPerf.h
SmmMpPerf.c
[Sources.Ia32]
- Ia32/Semaphore.c
Ia32/PageTbl.c
Ia32/SmmFuncsArch.c
Ia32/SmmProfileArch.c
Ia32/SmmProfileArch.h
- Ia32/SmmInit.nasm
Ia32/SmiEntry.nasm
Ia32/SmiException.nasm
Ia32/MpFuncs.nasm
Ia32/Cet.nasm
[Sources.X64]
- X64/Semaphore.c
X64/PageTbl.c
X64/SmmFuncsArch.c
X64/SmmProfileArch.c
X64/SmmProfileArch.h
- X64/SmmInit.nasm
X64/SmiEntry.nasm
X64/SmiException.nasm
X64/MpFuncs.nasm
X64/Cet.nasm
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
index 1e316ee0ac..b9a62aeeb0 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
@@ -67,79 +67,10 @@ IA32_DESCRIPTOR gSmiHandlerIdtr;
///
/// The mode of the CPU at the time an SMI occurs
///
UINT8 mSmmSaveStateRegisterLma;
-/**
- Hook the code executed immediately after an RSM instruction on the currently
- executing CPU. The mode of code executed immediately after RSM must be
- detected, and the appropriate hook must be selected. Always clear the auto
- HALT restart flag if it is set.
-
- @param[in] CpuIndex The processor index for the currently
- executing CPU.
- @param[in] CpuState Pointer to SMRAM Save State Map for the
- currently executing CPU.
- @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to
- 32-bit mode from 64-bit SMM.
- @param[in] NewInstructionPointer Instruction pointer to use if resuming to
- same mode as SMM.
-
- @retval The value of the original instruction pointer before it was hooked.
-
-**/
-UINT64
-EFIAPI
-HookReturnFromSmm (
- IN UINTN CpuIndex,
- SMRAM_SAVE_STATE_MAP *CpuState,
- UINT64 NewInstructionPointer32,
- UINT64 NewInstructionPointer
- )
-{
- UINT64 OriginalInstructionPointer;
-
- OriginalInstructionPointer = SmmCpuFeaturesHookReturnFromSmm (
- CpuIndex,
- CpuState,
- NewInstructionPointer32,
- NewInstructionPointer
- );
- if (OriginalInstructionPointer != 0) {
- return OriginalInstructionPointer;
- }
-
- if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
- OriginalInstructionPointer = (UINT64)CpuState->x86._EIP;
- CpuState->x86._EIP = (UINT32)NewInstructionPointer;
- //
- // Clear the auto HALT restart flag so the RSM instruction returns
- // program control to the instruction following the HLT instruction.
- //
- if ((CpuState->x86.AutoHALTRestart & BIT0) != 0) {
- CpuState->x86.AutoHALTRestart &= ~BIT0;
- }
- } else {
- OriginalInstructionPointer = CpuState->x64._RIP;
- if ((CpuState->x64.IA32_EFER & LMA) == 0) {
- CpuState->x64._RIP = (UINT32)NewInstructionPointer32;
- } else {
- CpuState->x64._RIP = (UINT32)NewInstructionPointer;
- }
-
- //
- // Clear the auto HALT restart flag so the RSM instruction returns
- // program control to the instruction following the HLT instruction.
- //
- if ((CpuState->x64.AutoHALTRestart & BIT0) != 0) {
- CpuState->x64.AutoHALTRestart &= ~BIT0;
- }
- }
-
- return OriginalInstructionPointer;
-}
-
/**
Get the size of the SMI Handler in bytes.
@retval The size, in bytes, of the SMI Handler.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
deleted file mode 100644
index dafbc3390e..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/** @file
-Semaphore mechanism to indicate to the BSP that an AP has exited SMM
-after SMBASE relocation.
-
-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
-SPDX-License-Identifier: BSD-2-Clause-Patent
-
-**/
-
-#include "PiSmmCpuDxeSmm.h"
-
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmRelocationOriginalAddressPtr32;
-X86_ASSEMBLY_PATCH_LABEL gPatchRebasedFlagAddr32;
-
-UINTN mSmmRelocationOriginalAddress;
-volatile BOOLEAN *mRebasedFlag;
-
-/**
-AP Semaphore operation in 32-bit mode while BSP runs in 64-bit mode.
-**/
-VOID
-SmmRelocationSemaphoreComplete32 (
- VOID
- );
-
-/**
- Hook return address of SMM Save State so that semaphore code
- can be executed immediately after AP exits SMM to indicate to
- the BSP that an AP has exited SMM after SMBASE relocation.
-
- @param[in] CpuIndex The processor index.
- @param[in] RebasedFlag A pointer to a flag that is set to TRUE
- immediately after AP exits SMM.
-
-**/
-VOID
-SemaphoreHook (
- IN UINTN CpuIndex,
- IN volatile BOOLEAN *RebasedFlag
- )
-{
- SMRAM_SAVE_STATE_MAP *CpuState;
- UINTN TempValue;
-
- mRebasedFlag = RebasedFlag;
- PatchInstructionX86 (
- gPatchRebasedFlagAddr32,
- (UINT32)(UINTN)mRebasedFlag,
- 4
- );
-
- CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
- mSmmRelocationOriginalAddress = HookReturnFromSmm (
- CpuIndex,
- CpuState,
- (UINT64)(UINTN)&SmmRelocationSemaphoreComplete32,
- (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
- );
-
- //
- // Use temp value to fix ICC compiler warning
- //
- TempValue = (UINTN)&mSmmRelocationOriginalAddress;
- PatchInstructionX86 (
- gPatchSmmRelocationOriginalAddressPtr32,
- (UINT32)TempValue,
- 4
- );
-}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
deleted file mode 100644
index 9cf3a6dcf9..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
+++ /dev/null
@@ -1,146 +0,0 @@
-;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
-; SPDX-License-Identifier: BSD-2-Clause-Patent
-;
-; Module Name:
-;
-; SmmInit.nasm
-;
-; Abstract:
-;
-; Functions for relocating SMBASE's for all processors
-;
-;-------------------------------------------------------------------------------
-
-%include "StuffRsbNasm.inc"
-
-extern ASM_PFX(SmmInitHandler)
-extern ASM_PFX(mRebasedFlag)
-extern ASM_PFX(mSmmRelocationOriginalAddress)
-
-global ASM_PFX(gPatchSmmCr3)
-global ASM_PFX(gPatchSmmCr4)
-global ASM_PFX(gPatchSmmCr0)
-global ASM_PFX(gPatchSmmInitStack)
-global ASM_PFX(gcSmiInitGdtr)
-global ASM_PFX(gcSmmInitSize)
-global ASM_PFX(gcSmmInitTemplate)
-global ASM_PFX(gPatchRebasedFlagAddr32)
-global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
-
-%define LONG_MODE_CS 0x38
-
- DEFAULT REL
- SECTION .text
-
-ASM_PFX(gcSmiInitGdtr):
- DW 0
- DQ 0
-
-global ASM_PFX(SmmStartup)
-
-BITS 16
-ASM_PFX(SmmStartup):
- mov eax, 0x80000001 ; read capability
- cpuid
- mov ebx, edx ; rdmsr will change edx. keep it in ebx.
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr3):
- mov cr3, eax
-o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr4):
- or ah, 2 ; enable XMM registers access
- mov cr4, eax
- mov ecx, 0xc0000080 ; IA32_EFER MSR
- rdmsr
- or ah, BIT0 ; set LME bit
- test ebx, BIT20 ; check NXE capability
- jz .1
- or ah, BIT3 ; set NXE bit
-.1:
- wrmsr
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr0):
- mov cr0, eax ; enable protected mode & paging
- jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
-@PatchLongModeOffset:
-
-BITS 64
-@LongMode: ; long-mode starts here
- mov rsp, strict qword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmInitStack):
- and sp, 0xfff0 ; make sure RSP is 16-byte aligned
- ;
- ; According to X64 calling convention, XMM0~5 are volatile, we need to save
- ; them before calling C-function.
- ;
- sub rsp, 0x60
- movdqa [rsp], xmm0
- movdqa [rsp + 0x10], xmm1
- movdqa [rsp + 0x20], xmm2
- movdqa [rsp + 0x30], xmm3
- movdqa [rsp + 0x40], xmm4
- movdqa [rsp + 0x50], xmm5
-
- add rsp, -0x20
- call ASM_PFX(SmmInitHandler)
- add rsp, 0x20
-
- ;
- ; Restore XMM0~5 after calling C-function.
- ;
- movdqa xmm0, [rsp]
- movdqa xmm1, [rsp + 0x10]
- movdqa xmm2, [rsp + 0x20]
- movdqa xmm3, [rsp + 0x30]
- movdqa xmm4, [rsp + 0x40]
- movdqa xmm5, [rsp + 0x50]
-
- StuffRsb64
- rsm
-
-BITS 16
-ASM_PFX(gcSmmInitTemplate):
- mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
- sub ebp, 0x30000
- jmp ebp
-@L1:
- DQ 0; ASM_PFX(SmmStartup)
-
-ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
-
-BITS 64
-global ASM_PFX(SmmRelocationSemaphoreComplete)
-ASM_PFX(SmmRelocationSemaphoreComplete):
- push rax
- mov rax, [ASM_PFX(mRebasedFlag)]
- mov byte [rax], 1
- pop rax
- jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
-
-;
-; Semaphore code running in 32-bit mode
-;
-BITS 32
-global ASM_PFX(SmmRelocationSemaphoreComplete32)
-ASM_PFX(SmmRelocationSemaphoreComplete32):
- push eax
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchRebasedFlagAddr32):
- mov byte [eax], 1
- pop eax
- jmp dword [dword 0] ; destination will be patched
-ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
-
-BITS 64
-global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
-ASM_PFX(PiSmmCpuSmmInitFixupAddress):
- lea rax, [@LongMode]
- lea rcx, [@PatchLongModeOffset - 6]
- mov dword [rcx], eax
-
- lea rax, [ASM_PFX(SmmStartup)]
- lea rcx, [@L1]
- mov qword [rcx], rax
- ret
--
2.16.2.windows.1
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#117601): https://edk2.groups.io/g/devel/message/117601
Mute This Topic: https://groups.io/mt/105442009/7686176
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [rebecca@openfw.io]
-=-=-=-=-=-=-=-=-=-=-=-
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [edk2-devel] [PATCH v1 13/13] UefiCpuPkg/PiSmmCpuDxeSmm: Remove SmBases relocation logic
@ 2024-04-11 10:35 Ni, Ray
0 siblings, 0 replies; 2+ messages in thread
From: Ni, Ray @ 2024-04-11 10:35 UTC (permalink / raw)
To: Wu, Jiaxin, devel@edk2.groups.io
Cc: Zeng, Star, Gerd Hoffmann, Kumar, Rahul R
[-- Attachment #1: Type: text/plain, Size: 41897 bytes --]
@@ -348,14 +336,10 @@ SmmInitHandler (
[Ray.1] Can you rename this function to a different name? Originally it was really a handler to initialize SMM env called from SmmInit.nasm. But today it's purely to initialize the SMM env.
How about "InitializeSmm"? And "EFIAPI" is not needed as it's not called from ASEMBLY anymore.
{
UINT32 ApicId;
UINTN Index;
BOOLEAN IsBsp;
- //
- // Update SMM IDT entries' code segment and load IDT
- //
- AsmWriteIdtr (&gcSmiIdtr);
[Ray.2]
OK.
The IDTR update is needed when it's called from SmmInit.nasm as IDTR is not updated there.
But it's not needed when it's called from SmmEntry.nasm as IDTR is updated there.
Other changes look good to me.
________________________________
From: Wu, Jiaxin <jiaxin.wu@intel.com>
Sent: Wednesday, April 10, 2024 21:57
To: devel@edk2.groups.io <devel@edk2.groups.io>
Cc: Ni, Ray <ray.ni@intel.com>; Zeng, Star <star.zeng@intel.com>; Gerd Hoffmann <kraxel@redhat.com>; Kumar, Rahul R <rahul.r.kumar@intel.com>
Subject: [PATCH v1 13/13] UefiCpuPkg/PiSmmCpuDxeSmm: Remove SmBases relocation logic
This patch is to remove legacy SmBase relocation in
PiSmmCpuDxeSmm Driver, and the SmBase relocation
behavior will be in the SmmRelocationInit interface:
1. Relocate smbases for each processor.
2. Create the gSmmBaseHobGuid HOB.
Then, PiSmmCpuDxeSmm driver can be simplified to:
1. Consume the gSmmBaseHobGuid for the smbase.
2. ExecuteFirstSmiInit for early SMM Init.
Cc: Ray Ni <ray.ni@intel.com>
Cc: Zeng Star <star.zeng@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com>
---
UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c | 21 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c | 42 ----
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm | 96 --------
UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c | 6 +-
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c | 322 ++-------------------------
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h | 98 --------
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf | 4 -
UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c | 69 ------
UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c | 69 ------
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm | 146 ------------
10 files changed, 30 insertions(+), 843 deletions(-)
delete mode 100644 UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
delete mode 100644 UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
delete mode 100644 UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
delete mode 100644 UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
index b14c289a27..d67fb49890 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
@@ -639,27 +639,14 @@ InitializeCpuProcedure (
//
InitializeCpuBeforeRebase (IsBsp);
}
if (IsBsp) {
- DEBUG ((DEBUG_INFO, "SmmRestoreCpu: mSmmRelocated is %d\n", mSmmRelocated));
-
//
- // Check whether Smm Relocation is done or not.
- // If not, will do the SmmBases Relocation here!!!
+ // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
//
- if (!mSmmRelocated) {
- //
- // Restore SMBASE for BSP and all APs
- //
- SmmRelocateBases ();
- } else {
- //
- // Issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
- //
- ExecuteFirstSmiInit ();
- }
+ ExecuteFirstSmiInit ();
}
//
// Skip initialization if mAcpiCpuData is not valid
//
@@ -978,13 +965,13 @@ InitSmmS3ResumeState (
SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize));
if (SmmS3ResumeState->SmmS3StackBase == 0) {
SmmS3ResumeState->SmmS3StackSize = 0;
}
- SmmS3ResumeState->SmmS3Cr0 = mSmmCr0;
+ SmmS3ResumeState->SmmS3Cr0 = (UINT32)AsmReadCr0 ();
SmmS3ResumeState->SmmS3Cr3 = Cr3;
- SmmS3ResumeState->SmmS3Cr4 = mSmmCr4;
+ SmmS3ResumeState->SmmS3Cr4 = (UINT32)AsmReadCr4 ();
if (sizeof (UINTN) == sizeof (UINT64)) {
SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64;
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
deleted file mode 100644
index a9fcc89dda..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/** @file
-Semaphore mechanism to indicate to the BSP that an AP has exited SMM
-after SMBASE relocation.
-
-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
-SPDX-License-Identifier: BSD-2-Clause-Patent
-
-**/
-
-#include "PiSmmCpuDxeSmm.h"
-
-UINTN mSmmRelocationOriginalAddress;
-volatile BOOLEAN *mRebasedFlag;
-
-/**
- Hook return address of SMM Save State so that semaphore code
- can be executed immediately after AP exits SMM to indicate to
- the BSP that an AP has exited SMM after SMBASE relocation.
-
- @param[in] CpuIndex The processor index.
- @param[in] RebasedFlag A pointer to a flag that is set to TRUE
- immediately after AP exits SMM.
-
-**/
-VOID
-SemaphoreHook (
- IN UINTN CpuIndex,
- IN volatile BOOLEAN *RebasedFlag
- )
-{
- SMRAM_SAVE_STATE_MAP *CpuState;
-
- mRebasedFlag = RebasedFlag;
-
- CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
- mSmmRelocationOriginalAddress = (UINTN)HookReturnFromSmm (
- CpuIndex,
- CpuState,
- (UINT64)(UINTN)&SmmRelocationSemaphoreComplete,
- (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
- );
-}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
deleted file mode 100644
index b5e77a1a5b..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
+++ /dev/null
@@ -1,96 +0,0 @@
-;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
-; SPDX-License-Identifier: BSD-2-Clause-Patent
-;
-; Module Name:
-;
-; SmmInit.nasm
-;
-; Abstract:
-;
-; Functions for relocating SMBASE's for all processors
-;
-;-------------------------------------------------------------------------------
-
-%include "StuffRsbNasm.inc"
-
-extern ASM_PFX(SmmInitHandler)
-extern ASM_PFX(mRebasedFlag)
-extern ASM_PFX(mSmmRelocationOriginalAddress)
-
-global ASM_PFX(gPatchSmmCr3)
-global ASM_PFX(gPatchSmmCr4)
-global ASM_PFX(gPatchSmmCr0)
-global ASM_PFX(gPatchSmmInitStack)
-global ASM_PFX(gcSmiInitGdtr)
-global ASM_PFX(gcSmmInitSize)
-global ASM_PFX(gcSmmInitTemplate)
-
-%define PROTECT_MODE_CS 0x8
-%define PROTECT_MODE_DS 0x20
-
- SECTION .text
-
-ASM_PFX(gcSmiInitGdtr):
- DW 0
- DQ 0
-
-global ASM_PFX(SmmStartup)
-
-BITS 16
-ASM_PFX(SmmStartup):
- mov eax, 0x80000001 ; read capability
- cpuid
- mov ebx, edx ; rdmsr will change edx. keep it in ebx.
- and ebx, BIT20 ; extract NX capability bit
- shr ebx, 9 ; shift bit to IA32_EFER.NXE[BIT11] position
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr3):
- mov cr3, eax
-o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr4):
- mov cr4, eax
- mov ecx, 0xc0000080 ; IA32_EFER MSR
- rdmsr
- or eax, ebx ; set NXE bit if NX is available
- wrmsr
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr0):
- mov di, PROTECT_MODE_DS
- mov cr0, eax
- jmp PROTECT_MODE_CS : dword @32bit
-
-BITS 32
-@32bit:
- mov ds, edi
- mov es, edi
- mov fs, edi
- mov gs, edi
- mov ss, edi
- mov esp, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmInitStack):
- call ASM_PFX(SmmInitHandler)
- StuffRsb32
- rsm
-
-BITS 16
-ASM_PFX(gcSmmInitTemplate):
- mov ebp, ASM_PFX(SmmStartup)
- sub ebp, 0x30000
- jmp ebp
-
-ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
-
-BITS 32
-global ASM_PFX(SmmRelocationSemaphoreComplete)
-ASM_PFX(SmmRelocationSemaphoreComplete):
- push eax
- mov eax, [ASM_PFX(mRebasedFlag)]
- mov byte [eax], 1
- pop eax
- jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
-
-global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
-ASM_PFX(PiSmmCpuSmmInitFixupAddress):
- ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index 081f0c1501..4180a25432 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -1511,22 +1511,20 @@ SmiRendezvous (
UINTN Index;
UINTN Cr2;
ASSERT (CpuIndex < mMaxNumberOfCpus);
- if (mSmmRelocated) {
- ASSERT (mSmmInitialized != NULL);
- }
+ ASSERT (mSmmInitialized != NULL);
//
// Save Cr2 because Page Fault exception in SMM may override its value,
// when using on-demand paging for above 4G memory.
//
Cr2 = 0;
SaveCr2 (&Cr2);
- if (mSmmRelocated && !mSmmInitialized[CpuIndex]) {
+ if (!mSmmInitialized[CpuIndex]) {
//
// Perform SmmInitHandler for CpuIndex
//
SmmInitHandler ();
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
index 499f979d34..bdf524e33d 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
@@ -54,15 +54,10 @@ CPU_HOT_PLUG_DATA mCpuHotPlugData = {
//
// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM
//
SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData;
-//
-// SMM Relocation variables
-//
-volatile BOOLEAN *mRebased;
-
///
/// Handle for the SMM CPU Protocol
///
EFI_HANDLE mSmmCpuHandle = NULL;
@@ -83,11 +78,10 @@ EDKII_SMM_MEMORY_ATTRIBUTE_PROTOCOL mSmmMemoryAttribute = {
EdkiiSmmClearMemoryAttributes
};
EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER];
-BOOLEAN mSmmRelocated = FALSE;
volatile BOOLEAN *mSmmInitialized = NULL;
UINT32 mBspApicId = 0;
//
// SMM stack information
@@ -133,16 +127,10 @@ SPIN_LOCK *mConfigSmmCodeAccessCheckLock = NULL;
EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
UINTN mSmmCpuSmramRangeCount;
UINT8 mPhysicalAddressBits;
-//
-// Control register contents saved for SMM S3 resume state initialization.
-//
-UINT32 mSmmCr0;
-UINT32 mSmmCr4;
-
/**
Initialize IDT to setup exception handlers for SMM.
**/
VOID
@@ -348,14 +336,10 @@ SmmInitHandler (
{
UINT32 ApicId;
UINTN Index;
BOOLEAN IsBsp;
- //
- // Update SMM IDT entries' code segment and load IDT
- //
- AsmWriteIdtr (&gcSmiIdtr);
ApicId = GetApicId ();
IsBsp = (BOOLEAN)(mBspApicId == ApicId);
ASSERT (mNumberOfCpus <= mMaxNumberOfCpus);
@@ -386,17 +370,10 @@ SmmInitHandler (
// Initialize private data during S3 resume
//
InitializeMpSyncData ();
}
- if (!mSmmRelocated) {
- //
- // Hook return after RSM to set SMM re-based flag
- //
- SemaphoreHook (Index, &mRebased[Index]);
- }
-
PERF_CODE (
MpPerfEnd (Index, SMM_MP_PERF_PROCEDURE_ID (SmmInitHandler));
);
return;
@@ -454,111 +431,10 @@ ExecuteFirstSmiInit (
}
PERF_FUNCTION_END ();
}
-/**
- Relocate SmmBases for each processor.
-
- Execute on first boot and all S3 resumes
-
-**/
-VOID
-EFIAPI
-SmmRelocateBases (
- VOID
- )
-{
- UINT8 BakBuf[BACK_BUF_SIZE];
- SMRAM_SAVE_STATE_MAP BakBuf2;
- SMRAM_SAVE_STATE_MAP *CpuStatePtr;
- UINT8 *U8Ptr;
- UINTN Index;
- UINTN BspIndex;
-
- PERF_FUNCTION_BEGIN ();
-
- //
- // Make sure the reserved size is large enough for procedure SmmInitTemplate.
- //
- ASSERT (sizeof (BakBuf) >= gcSmmInitSize);
-
- //
- // Patch ASM code template with current CR0, CR3, and CR4 values
- //
- mSmmCr0 = (UINT32)AsmReadCr0 ();
- PatchInstructionX86 (gPatchSmmCr0, mSmmCr0, 4);
- PatchInstructionX86 (gPatchSmmCr3, AsmReadCr3 (), 4);
- mSmmCr4 = (UINT32)AsmReadCr4 ();
- PatchInstructionX86 (gPatchSmmCr4, mSmmCr4 & (~CR4_CET_ENABLE), 4);
-
- //
- // Patch GDTR for SMM base relocation
- //
- gcSmiInitGdtr.Base = gcSmiGdtr.Base;
- gcSmiInitGdtr.Limit = gcSmiGdtr.Limit;
-
- U8Ptr = (UINT8 *)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET);
- CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
-
- //
- // Backup original contents at address 0x38000
- //
- CopyMem (BakBuf, U8Ptr, sizeof (BakBuf));
- CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2));
-
- //
- // Load image for relocation
- //
- CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize);
-
- //
- // Retrieve the local APIC ID of current processor
- //
- mBspApicId = GetApicId ();
-
- //
- // Relocate SM bases for all APs
- // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate
- //
- BspIndex = (UINTN)-1;
- for (Index = 0; Index < mNumberOfCpus; Index++) {
- mRebased[Index] = FALSE;
- if (mBspApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) {
- SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId);
- //
- // Wait for this AP to finish its 1st SMI
- //
- while (!mRebased[Index]) {
- }
- } else {
- //
- // BSP will be Relocated later
- //
- BspIndex = Index;
- }
- }
-
- //
- // Relocate BSP's SMM base
- //
- ASSERT (BspIndex != (UINTN)-1);
- SendSmiIpi (mBspApicId);
- //
- // Wait for the BSP to finish its 1st SMI
- //
- while (!mRebased[BspIndex]) {
- }
-
- //
- // Restore contents at address 0x38000
- //
- CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2));
- CopyMem (U8Ptr, BakBuf, sizeof (BakBuf));
- PERF_FUNCTION_END ();
-}
-
/**
SMM Ready To Lock event notification handler.
The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to
perform additional lock actions that must be performed from SMM on the next SMI.
@@ -881,12 +757,10 @@ PiCpuSmmEntry (
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
UINTN Index;
- VOID *Buffer;
- UINTN BufferPages;
UINTN TileCodeSize;
UINTN TileDataSize;
UINTN TileSize;
UINT8 *Stacks;
VOID *Registration;
@@ -901,11 +775,10 @@ PiCpuSmmEntry (
PERF_FUNCTION_BEGIN ();
//
// Initialize address fixup
//
- PiSmmCpuSmmInitFixupAddress ();
PiSmmCpuSmiEntryFixupAddress ();
//
// Initialize Debug Agent to support source level debug in SMM code
//
@@ -1113,65 +986,34 @@ PiCpuSmmEntry (
// context must be reduced.
//
ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET));
//
- // Retrive the allocated SmmBase from gSmmBaseHobGuid. If found,
+ // Retrieve the allocated SmmBase from gSmmBaseHobGuid. If found,
// means the SmBase relocation has been done.
//
mCpuHotPlugData.SmBase = NULL;
Status = GetSmBase (mMaxNumberOfCpus, &mCpuHotPlugData.SmBase);
- if (Status == EFI_OUT_OF_RESOURCES) {
- ASSERT (Status != EFI_OUT_OF_RESOURCES);
+ ASSERT (!EFI_ERROR (Status));
+ if (EFI_ERROR (Status)) {
CpuDeadLoop ();
}
- if (!EFI_ERROR (Status)) {
- ASSERT (mCpuHotPlugData.SmBase != NULL);
- //
- // Check whether the Required TileSize is enough.
- //
- if (TileSize > SIZE_8KB) {
- DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
- FreePool (mCpuHotPlugData.SmBase);
- FreePool (gSmmCpuPrivate->ProcessorInfo);
- CpuDeadLoop ();
- return RETURN_BUFFER_TOO_SMALL;
- }
-
- mSmmRelocated = TRUE;
- } else {
- ASSERT (Status == EFI_NOT_FOUND);
- ASSERT (mCpuHotPlugData.SmBase == NULL);
- //
- // When the HOB doesn't exist, allocate new SMBASE itself.
- //
- DEBUG ((DEBUG_INFO, "PiCpuSmmEntry: gSmmBaseHobGuid not found!\n"));
-
- mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus);
- if (mCpuHotPlugData.SmBase == NULL) {
- ASSERT (mCpuHotPlugData.SmBase != NULL);
- CpuDeadLoop ();
- }
-
- //
- // very old processors (i486 + pentium) need 32k not 4k alignment, exclude them.
- //
- ASSERT (FamilyId >= 6);
- //
- // Allocate buffer for all of the tiles.
- //
- BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
- Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
- if (Buffer == NULL) {
- DEBUG ((DEBUG_ERROR, "Failed to allocate %Lu pages.\n", (UINT64)BufferPages));
- CpuDeadLoop ();
- return EFI_OUT_OF_RESOURCES;
- }
+ //
+ // ASSERT SmBase has been relocated.
+ //
+ ASSERT (mCpuHotPlugData.SmBase != NULL);
- ASSERT (Buffer != NULL);
- DEBUG ((DEBUG_INFO, "New Allcoated SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE (BufferPages)));
+ //
+ // Check whether the Required TileSize is enough.
+ //
+ if (TileSize > SIZE_8KB) {
+ DEBUG ((DEBUG_ERROR, "The Range of Smbase in SMRAM is not enough -- Required TileSize = 0x%08x, Actual TileSize = 0x%08x\n", TileSize, SIZE_8KB));
+ FreePool (mCpuHotPlugData.SmBase);
+ FreePool (gSmmCpuPrivate->ProcessorInfo);
+ CpuDeadLoop ();
+ return RETURN_BUFFER_TOO_SMALL;
}
//
// Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA.
//
@@ -1198,14 +1040,10 @@ PiCpuSmmEntry (
// Retrieve APIC ID of each enabled processor from the MP Services protocol.
// Also compute the SMBASE address, CPU Save State address, and CPU Save state
// size for each CPU in the platform
//
for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
- if (!mSmmRelocated) {
- mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET;
- }
-
gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof (SMRAM_SAVE_STATE_MAP);
gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET);
gSmmCpuPrivate->Operation[Index] = SmmCpuNone;
if (Index < mNumberOfCpus) {
@@ -1297,45 +1135,15 @@ PiCpuSmmEntry (
DEBUG ((DEBUG_INFO, "PcdCpuSmmStackGuard - 0x%x\n", FeaturePcdGet (PcdCpuSmmStackGuard)));
if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
DEBUG ((DEBUG_INFO, "mSmmShadowStackSize - 0x%x\n", mSmmShadowStackSize));
}
- //
- // Set SMI stack for SMM base relocation
- //
- PatchInstructionX86 (
- gPatchSmmInitStack,
- (UINTN)(Stacks + mSmmStackSize - sizeof (UINTN)),
- sizeof (UINTN)
- );
-
//
// Initialize IDT
//
InitializeSmmIdt ();
- //
- // Check whether Smm Relocation is done or not.
- // If not, will do the SmmBases Relocation here!!!
- //
- if (!mSmmRelocated) {
- //
- // Relocate SMM Base addresses to the ones allocated from SMRAM
- //
- mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus);
- ASSERT (mRebased != NULL);
- SmmRelocateBases ();
-
- //
- // Call hook for BSP to perform extra actions in normal mode after all
- // SMM base addresses have been relocated on all CPUs
- //
- SmmCpuFeaturesSmmRelocationComplete ();
- }
-
- DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
-
//
// SMM Time initialization
//
InitializeSmmTimer ();
@@ -1368,19 +1176,19 @@ PiCpuSmmEntry (
//
// For relocated SMBASE, some MSRs & CSRs are still required to be configured in SMM Mode for SMM Initialization.
// Those MSRs & CSRs must be configured before normal SMI sources happen.
// So, here is to issue SMI IPI (All Excluding Self SMM IPI + BSP SMM IPI) to execute first SMI init.
//
- if (mSmmRelocated) {
- ExecuteFirstSmiInit ();
+ ExecuteFirstSmiInit ();
- //
- // Call hook for BSP to perform extra actions in normal mode after all
- // SMM base addresses have been relocated on all CPUs
- //
- SmmCpuFeaturesSmmRelocationComplete ();
- }
+ //
+ // Call hook for BSP to perform extra actions in normal mode after all
+ // SMM base addresses have been relocated on all CPUs
+ //
+ SmmCpuFeaturesSmmRelocationComplete ();
+
+ DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
//
// Fill in SMM Reserved Regions
//
gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0;
@@ -1767,92 +1575,10 @@ AllocateCodePages (
}
return (VOID *)(UINTN)Memory;
}
-/**
- Allocate aligned pages for code.
-
- @param[in] Pages Number of pages to be allocated.
- @param[in] Alignment The requested alignment of the allocation.
- Must be a power of two.
- If Alignment is zero, then byte alignment is used.
-
- @return Allocated memory.
-**/
-VOID *
-AllocateAlignedCodePages (
- IN UINTN Pages,
- IN UINTN Alignment
- )
-{
- EFI_STATUS Status;
- EFI_PHYSICAL_ADDRESS Memory;
- UINTN AlignedMemory;
- UINTN AlignmentMask;
- UINTN UnalignedPages;
- UINTN RealPages;
-
- //
- // Alignment must be a power of two or zero.
- //
- ASSERT ((Alignment & (Alignment - 1)) == 0);
-
- if (Pages == 0) {
- return NULL;
- }
-
- if (Alignment > EFI_PAGE_SIZE) {
- //
- // Calculate the total number of pages since alignment is larger than page size.
- //
- AlignmentMask = Alignment - 1;
- RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
- //
- // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
- //
- ASSERT (RealPages > Pages);
-
- Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
- if (EFI_ERROR (Status)) {
- return NULL;
- }
-
- AlignedMemory = ((UINTN)Memory + AlignmentMask) & ~AlignmentMask;
- UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN)Memory);
- if (UnalignedPages > 0) {
- //
- // Free first unaligned page(s).
- //
- Status = gSmst->SmmFreePages (Memory, UnalignedPages);
- ASSERT_EFI_ERROR (Status);
- }
-
- Memory = AlignedMemory + EFI_PAGES_TO_SIZE (Pages);
- UnalignedPages = RealPages - Pages - UnalignedPages;
- if (UnalignedPages > 0) {
- //
- // Free last unaligned page(s).
- //
- Status = gSmst->SmmFreePages (Memory, UnalignedPages);
- ASSERT_EFI_ERROR (Status);
- }
- } else {
- //
- // Do not over-allocate pages in this case.
- //
- Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
- if (EFI_ERROR (Status)) {
- return NULL;
- }
-
- AlignedMemory = (UINTN)Memory;
- }
-
- return (VOID *)AlignedMemory;
-}
-
/**
Perform the remaining tasks.
**/
VOID
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
index 7f244ea803..097a8c1b25 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -186,18 +186,10 @@ typedef struct {
// Code select value
//
#define PROTECT_MODE_CODE_SEGMENT 0x08
#define LONG_MODE_CODE_SEGMENT 0x38
-//
-// The size 0x20 must be bigger than
-// the size of template code of SmmInit. Currently,
-// the size of SmmInit requires the 0x16 Bytes buffer
-// at least.
-//
-#define BACK_BUF_SIZE 0x20
-
#define EXCEPTION_VECTOR_NUMBER 0x20
#define INVALID_APIC_ID 0xFFFFFFFFFFFFFFFFULL
//
@@ -373,22 +365,13 @@ SmmInitHandler (
VOID
ExecuteFirstSmiInit (
VOID
);
-extern BOOLEAN mSmmRelocated;
extern volatile BOOLEAN *mSmmInitialized;
extern UINT32 mBspApicId;
-extern CONST UINT8 gcSmmInitTemplate[];
-extern CONST UINT16 gcSmmInitSize;
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr0;
-extern UINT32 mSmmCr0;
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr3;
-extern UINT32 mSmmCr4;
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmCr4;
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmInitStack;
X86_ASSEMBLY_PATCH_LABEL mPatchCetSupported;
extern BOOLEAN mCetSupported;
/**
Semaphore operation for all processor relocate SMMBase.
@@ -473,11 +456,10 @@ extern UINT64 gPhyMask;
extern SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData;
extern UINTN mSmmStackArrayBase;
extern UINTN mSmmStackArrayEnd;
extern UINTN mSmmStackSize;
extern EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService;
-extern IA32_DESCRIPTOR gcSmiInitGdtr;
extern SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
extern UINTN mSemaphoreSize;
extern SPIN_LOCK *mPFLock;
extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;
extern EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
@@ -791,22 +773,10 @@ VOID
FindSmramInfo (
OUT UINT32 *SmrrBase,
OUT UINT32 *SmrrSize
);
-/**
- Relocate SmmBases for each processor.
-
- Execute on first boot and all S3 resumes
-
-**/
-VOID
-EFIAPI
-SmmRelocateBases (
- VOID
- );
-
/**
Page Fault handler for SMM use.
@param InterruptType Defines the type of interrupt or exception that
occurred on the processor.This parameter is processor architecture specific.
@@ -847,62 +817,19 @@ PerformPreTasks (
VOID
InitMsrSpinLockByIndex (
IN UINT32 MsrIndex
);
-/**
- Hook return address of SMM Save State so that semaphore code
- can be executed immediately after AP exits SMM to indicate to
- the BSP that an AP has exited SMM after SMBASE relocation.
-
- @param[in] CpuIndex The processor index.
- @param[in] RebasedFlag A pointer to a flag that is set to TRUE
- immediately after AP exits SMM.
-
-**/
-VOID
-SemaphoreHook (
- IN UINTN CpuIndex,
- IN volatile BOOLEAN *RebasedFlag
- );
-
/**
Configure SMM Code Access Check feature for all processors.
SMM Feature Control MSR will be locked after configuration.
**/
VOID
ConfigSmmCodeAccessCheck (
VOID
);
-/**
- Hook the code executed immediately after an RSM instruction on the currently
- executing CPU. The mode of code executed immediately after RSM must be
- detected, and the appropriate hook must be selected. Always clear the auto
- HALT restart flag if it is set.
-
- @param[in] CpuIndex The processor index for the currently
- executing CPU.
- @param[in] CpuState Pointer to SMRAM Save State Map for the
- currently executing CPU.
- @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to
- 32-bit mode from 64-bit SMM.
- @param[in] NewInstructionPointer Instruction pointer to use if resuming to
- same mode as SMM.
-
- @retval The value of the original instruction pointer before it was hooked.
-
-**/
-UINT64
-EFIAPI
-HookReturnFromSmm (
- IN UINTN CpuIndex,
- SMRAM_SAVE_STATE_MAP *CpuState,
- UINT64 NewInstructionPointer32,
- UINT64 NewInstructionPointer
- );
-
/**
Get the size of the SMI Handler in bytes.
@retval The size, in bytes, of the SMI Handler.
@@ -1103,26 +1030,10 @@ AllocatePageTableMemory (
VOID *
AllocateCodePages (
IN UINTN Pages
);
-/**
- Allocate aligned pages for code.
-
- @param[in] Pages Number of pages to be allocated.
- @param[in] Alignment The requested alignment of the allocation.
- Must be a power of two.
- If Alignment is zero, then byte alignment is used.
-
- @return Allocated memory.
-**/
-VOID *
-AllocateAlignedCodePages (
- IN UINTN Pages,
- IN UINTN Alignment
- );
-
//
// S3 related global variable and function prototype.
//
extern BOOLEAN mSmmS3Flag;
@@ -1300,19 +1211,10 @@ EdkiiSmmGetMemoryAttributes (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 *Attributes
);
-/**
- This function fixes up the address of the global variable or function
- referred in SmmInit assembly files to be the absolute address.
-**/
-VOID
-EFIAPI
-PiSmmCpuSmmInitFixupAddress (
- );
-
/**
This function fixes up the address of the global variable or function
referred in SmiEntry assembly files to be the absolute address.
**/
VOID
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
index a018954ed7..1a230ad2d0 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
@@ -45,28 +45,24 @@
SmmMp.c
SmmMpPerf.h
SmmMpPerf.c
[Sources.Ia32]
- Ia32/Semaphore.c
Ia32/PageTbl.c
Ia32/SmmFuncsArch.c
Ia32/SmmProfileArch.c
Ia32/SmmProfileArch.h
- Ia32/SmmInit.nasm
Ia32/SmiEntry.nasm
Ia32/SmiException.nasm
Ia32/MpFuncs.nasm
Ia32/Cet.nasm
[Sources.X64]
- X64/Semaphore.c
X64/PageTbl.c
X64/SmmFuncsArch.c
X64/SmmProfileArch.c
X64/SmmProfileArch.h
- X64/SmmInit.nasm
X64/SmiEntry.nasm
X64/SmiException.nasm
X64/MpFuncs.nasm
X64/Cet.nasm
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
index 1e316ee0ac..b9a62aeeb0 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c
@@ -67,79 +67,10 @@ IA32_DESCRIPTOR gSmiHandlerIdtr;
///
/// The mode of the CPU at the time an SMI occurs
///
UINT8 mSmmSaveStateRegisterLma;
-/**
- Hook the code executed immediately after an RSM instruction on the currently
- executing CPU. The mode of code executed immediately after RSM must be
- detected, and the appropriate hook must be selected. Always clear the auto
- HALT restart flag if it is set.
-
- @param[in] CpuIndex The processor index for the currently
- executing CPU.
- @param[in] CpuState Pointer to SMRAM Save State Map for the
- currently executing CPU.
- @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to
- 32-bit mode from 64-bit SMM.
- @param[in] NewInstructionPointer Instruction pointer to use if resuming to
- same mode as SMM.
-
- @retval The value of the original instruction pointer before it was hooked.
-
-**/
-UINT64
-EFIAPI
-HookReturnFromSmm (
- IN UINTN CpuIndex,
- SMRAM_SAVE_STATE_MAP *CpuState,
- UINT64 NewInstructionPointer32,
- UINT64 NewInstructionPointer
- )
-{
- UINT64 OriginalInstructionPointer;
-
- OriginalInstructionPointer = SmmCpuFeaturesHookReturnFromSmm (
- CpuIndex,
- CpuState,
- NewInstructionPointer32,
- NewInstructionPointer
- );
- if (OriginalInstructionPointer != 0) {
- return OriginalInstructionPointer;
- }
-
- if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
- OriginalInstructionPointer = (UINT64)CpuState->x86._EIP;
- CpuState->x86._EIP = (UINT32)NewInstructionPointer;
- //
- // Clear the auto HALT restart flag so the RSM instruction returns
- // program control to the instruction following the HLT instruction.
- //
- if ((CpuState->x86.AutoHALTRestart & BIT0) != 0) {
- CpuState->x86.AutoHALTRestart &= ~BIT0;
- }
- } else {
- OriginalInstructionPointer = CpuState->x64._RIP;
- if ((CpuState->x64.IA32_EFER & LMA) == 0) {
- CpuState->x64._RIP = (UINT32)NewInstructionPointer32;
- } else {
- CpuState->x64._RIP = (UINT32)NewInstructionPointer;
- }
-
- //
- // Clear the auto HALT restart flag so the RSM instruction returns
- // program control to the instruction following the HLT instruction.
- //
- if ((CpuState->x64.AutoHALTRestart & BIT0) != 0) {
- CpuState->x64.AutoHALTRestart &= ~BIT0;
- }
- }
-
- return OriginalInstructionPointer;
-}
-
/**
Get the size of the SMI Handler in bytes.
@retval The size, in bytes, of the SMI Handler.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
deleted file mode 100644
index dafbc3390e..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/Semaphore.c
+++ /dev/null
@@ -1,69 +0,0 @@
-/** @file
-Semaphore mechanism to indicate to the BSP that an AP has exited SMM
-after SMBASE relocation.
-
-Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
-SPDX-License-Identifier: BSD-2-Clause-Patent
-
-**/
-
-#include "PiSmmCpuDxeSmm.h"
-
-X86_ASSEMBLY_PATCH_LABEL gPatchSmmRelocationOriginalAddressPtr32;
-X86_ASSEMBLY_PATCH_LABEL gPatchRebasedFlagAddr32;
-
-UINTN mSmmRelocationOriginalAddress;
-volatile BOOLEAN *mRebasedFlag;
-
-/**
-AP Semaphore operation in 32-bit mode while BSP runs in 64-bit mode.
-**/
-VOID
-SmmRelocationSemaphoreComplete32 (
- VOID
- );
-
-/**
- Hook return address of SMM Save State so that semaphore code
- can be executed immediately after AP exits SMM to indicate to
- the BSP that an AP has exited SMM after SMBASE relocation.
-
- @param[in] CpuIndex The processor index.
- @param[in] RebasedFlag A pointer to a flag that is set to TRUE
- immediately after AP exits SMM.
-
-**/
-VOID
-SemaphoreHook (
- IN UINTN CpuIndex,
- IN volatile BOOLEAN *RebasedFlag
- )
-{
- SMRAM_SAVE_STATE_MAP *CpuState;
- UINTN TempValue;
-
- mRebasedFlag = RebasedFlag;
- PatchInstructionX86 (
- gPatchRebasedFlagAddr32,
- (UINT32)(UINTN)mRebasedFlag,
- 4
- );
-
- CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
- mSmmRelocationOriginalAddress = HookReturnFromSmm (
- CpuIndex,
- CpuState,
- (UINT64)(UINTN)&SmmRelocationSemaphoreComplete32,
- (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
- );
-
- //
- // Use temp value to fix ICC compiler warning
- //
- TempValue = (UINTN)&mSmmRelocationOriginalAddress;
- PatchInstructionX86 (
- gPatchSmmRelocationOriginalAddressPtr32,
- (UINT32)TempValue,
- 4
- );
-}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
deleted file mode 100644
index 9cf3a6dcf9..0000000000
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
+++ /dev/null
@@ -1,146 +0,0 @@
-;------------------------------------------------------------------------------ ;
-; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
-; SPDX-License-Identifier: BSD-2-Clause-Patent
-;
-; Module Name:
-;
-; SmmInit.nasm
-;
-; Abstract:
-;
-; Functions for relocating SMBASE's for all processors
-;
-;-------------------------------------------------------------------------------
-
-%include "StuffRsbNasm.inc"
-
-extern ASM_PFX(SmmInitHandler)
-extern ASM_PFX(mRebasedFlag)
-extern ASM_PFX(mSmmRelocationOriginalAddress)
-
-global ASM_PFX(gPatchSmmCr3)
-global ASM_PFX(gPatchSmmCr4)
-global ASM_PFX(gPatchSmmCr0)
-global ASM_PFX(gPatchSmmInitStack)
-global ASM_PFX(gcSmiInitGdtr)
-global ASM_PFX(gcSmmInitSize)
-global ASM_PFX(gcSmmInitTemplate)
-global ASM_PFX(gPatchRebasedFlagAddr32)
-global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
-
-%define LONG_MODE_CS 0x38
-
- DEFAULT REL
- SECTION .text
-
-ASM_PFX(gcSmiInitGdtr):
- DW 0
- DQ 0
-
-global ASM_PFX(SmmStartup)
-
-BITS 16
-ASM_PFX(SmmStartup):
- mov eax, 0x80000001 ; read capability
- cpuid
- mov ebx, edx ; rdmsr will change edx. keep it in ebx.
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr3):
- mov cr3, eax
-o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr4):
- or ah, 2 ; enable XMM registers access
- mov cr4, eax
- mov ecx, 0xc0000080 ; IA32_EFER MSR
- rdmsr
- or ah, BIT0 ; set LME bit
- test ebx, BIT20 ; check NXE capability
- jz .1
- or ah, BIT3 ; set NXE bit
-.1:
- wrmsr
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmCr0):
- mov cr0, eax ; enable protected mode & paging
- jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
-@PatchLongModeOffset:
-
-BITS 64
-@LongMode: ; long-mode starts here
- mov rsp, strict qword 0 ; source operand will be patched
-ASM_PFX(gPatchSmmInitStack):
- and sp, 0xfff0 ; make sure RSP is 16-byte aligned
- ;
- ; According to X64 calling convention, XMM0~5 are volatile, we need to save
- ; them before calling C-function.
- ;
- sub rsp, 0x60
- movdqa [rsp], xmm0
- movdqa [rsp + 0x10], xmm1
- movdqa [rsp + 0x20], xmm2
- movdqa [rsp + 0x30], xmm3
- movdqa [rsp + 0x40], xmm4
- movdqa [rsp + 0x50], xmm5
-
- add rsp, -0x20
- call ASM_PFX(SmmInitHandler)
- add rsp, 0x20
-
- ;
- ; Restore XMM0~5 after calling C-function.
- ;
- movdqa xmm0, [rsp]
- movdqa xmm1, [rsp + 0x10]
- movdqa xmm2, [rsp + 0x20]
- movdqa xmm3, [rsp + 0x30]
- movdqa xmm4, [rsp + 0x40]
- movdqa xmm5, [rsp + 0x50]
-
- StuffRsb64
- rsm
-
-BITS 16
-ASM_PFX(gcSmmInitTemplate):
- mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
- sub ebp, 0x30000
- jmp ebp
-@L1:
- DQ 0; ASM_PFX(SmmStartup)
-
-ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
-
-BITS 64
-global ASM_PFX(SmmRelocationSemaphoreComplete)
-ASM_PFX(SmmRelocationSemaphoreComplete):
- push rax
- mov rax, [ASM_PFX(mRebasedFlag)]
- mov byte [rax], 1
- pop rax
- jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
-
-;
-; Semaphore code running in 32-bit mode
-;
-BITS 32
-global ASM_PFX(SmmRelocationSemaphoreComplete32)
-ASM_PFX(SmmRelocationSemaphoreComplete32):
- push eax
- mov eax, strict dword 0 ; source operand will be patched
-ASM_PFX(gPatchRebasedFlagAddr32):
- mov byte [eax], 1
- pop eax
- jmp dword [dword 0] ; destination will be patched
-ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
-
-BITS 64
-global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
-ASM_PFX(PiSmmCpuSmmInitFixupAddress):
- lea rax, [@LongMode]
- lea rcx, [@PatchLongModeOffset - 6]
- mov dword [rcx], eax
-
- lea rax, [ASM_PFX(SmmStartup)]
- lea rcx, [@L1]
- mov qword [rcx], rax
- ret
--
2.16.2.windows.1
-=-=-=-=-=-=-=-=-=-=-=-
Groups.io Links: You receive all messages sent to this group.
View/Reply Online (#117631): https://edk2.groups.io/g/devel/message/117631
Mute This Topic: https://groups.io/mt/105442009/7686176
Group Owner: devel+owner@edk2.groups.io
Unsubscribe: https://edk2.groups.io/g/devel/unsub [rebecca@openfw.io]
-=-=-=-=-=-=-=-=-=-=-=-
[-- Attachment #2: Type: text/html, Size: 78623 bytes --]
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2024-04-11 10:35 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-04-11 10:35 [edk2-devel] [PATCH v1 13/13] UefiCpuPkg/PiSmmCpuDxeSmm: Remove SmBases relocation logic Ni, Ray
-- strict thread matches above, loose matches on Subject: below --
2024-04-10 13:57 [edk2-devel] [PATCH v1 00/13] Add SmmRelocationLib Wu, Jiaxin
2024-04-10 13:57 ` [edk2-devel] [PATCH v1 13/13] UefiCpuPkg/PiSmmCpuDxeSmm: Remove SmBases relocation logic Wu, Jiaxin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox