From: "Wu, Jiaxin" <jiaxin.wu@intel.com>
To: devel@edk2.groups.io
Cc: Eric Dong <eric.dong@intel.com>, Ray Ni <ray.ni@intel.com>
Subject: [PATCH v1] [PATCH v1] UefiCpuPkg: Check SMM Delayed/Blocked AP Count to decide all CPUs in SMI or not
Date: Tue, 29 Nov 2022 16:18:52 +0800 [thread overview]
Message-ID: <20221129081852.12888-1-jiaxin.wu@intel.com> (raw)
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4173
The blocked register might return the counter instead of bitvector. This request
is to update the code to handle the case by checking SMM Delayed/Blocked AP Count
to decide all CPUs in SMI or not.
Cc: Eric Dong <eric.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com>
---
UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c | 193 +++++++++++++++++++++++++----
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c | 5 +
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h | 16 ++-
3 files changed, 182 insertions(+), 32 deletions(-)
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index c79da418e3..5996570903 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -22,10 +22,15 @@ UINTN mSemaphoreSize;
SPIN_LOCK *mPFLock = NULL;
SMM_CPU_SYNC_MODE mCpuSmmSyncMode;
BOOLEAN mMachineCheckSupported = FALSE;
MM_COMPLETION mSmmStartupThisApToken;
+//
+// Processor specified by mPackageBspInfo[PackageIndex] will do the package-scope register check.
+//
+UINT32 *mPackageBspInfo = NULL;
+
extern UINTN mSmmShadowStackSize;
/**
Performs an atomic compare exchange operation to get semaphore.
The compare exchange operation must be performed using
@@ -155,54 +160,128 @@ ReleaseAllAPs (
}
}
}
/**
- Checks if all CPUs (with certain exceptions) have checked in for this SMI run
+ Check whether the index of CPU perform the package level register
+ programming during System Management Mode initialization.
- @param Exceptions CPU Arrival exception flags.
+ Processor specified by mPackageBspInfo[PackageIndex] will do the package-scope
+ register programming.
- @retval TRUE if all CPUs the have checked in.
- @retval FALSE if at least one Normal AP hasn't checked in.
+ @retval TRUE Perform the package level register programming.
+ @retval FALSE Don't perform the package level register programming.
**/
BOOLEAN
-AllCpusInSmmWithExceptions (
- SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions
+IsPackageBsp (
+ IN UINTN CpuIndex
)
{
- UINTN Index;
- SMM_CPU_DATA_BLOCK *CpuData;
- EFI_PROCESSOR_INFORMATION *ProcessorInfo;
+ UINT32 PackageIndex;
- ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
+ PackageIndex = gSmmCpuPrivate->ProcessorInfo[CpuIndex].Location.Package;
- if (*mSmmMpSyncData->Counter == mNumberOfCpus) {
- return TRUE;
+ ASSERT (mPackageBspInfo != NULL);
+
+ //
+ // Set the value of mPackageBspInfo[PackageIndex].
+ // The package-scope register are checked by the first processor (CpuIndex) in Package.
+ //
+ // If mPackageBspInfo[PackageIndex] equals to (UINT32)-1, then update
+ // to current CpuIndex. If it doesn't equal to (UINT32)-1, don't change it.
+ //
+ if (mPackageBspInfo[PackageIndex] == (UINT32)-1) {
+ mPackageBspInfo[PackageIndex] = (UINT32)CpuIndex;
}
- CpuData = mSmmMpSyncData->CpuData;
- ProcessorInfo = gSmmCpuPrivate->ProcessorInfo;
- for (Index = 0; Index < mMaxNumberOfCpus; Index++) {
- if (!(*(CpuData[Index].Present)) && (ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID)) {
- if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0)) {
- continue;
- }
+ return (BOOLEAN) (mPackageBspInfo[PackageIndex] == CpuIndex);
+}
+
+/**
+ Returns the Number of SMM Delayed & Blocked & Disabled Thread Count.
+
+ @param[in,out] DelayedCount The Number of SMM Delayed Thread Count.
+ @param[in,out] BlockedCount The Number of SMM Blocked Thread Count.
+ @param[in,out] DisabledCount The Number of SMM Disabled Thread Count.
+
+**/
+VOID
+GetSmmDelayedBlockedDisabledCount (
+ IN OUT UINT32 *DelayedCount,
+ IN OUT UINT32 *BlockedCount,
+ IN OUT UINT32 *DisabledCount
+ )
+{
+ UINTN Index;
+
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ if (IsPackageBsp (Index)) {
- if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0)) {
- continue;
+ if (DelayedCount != NULL) {
+ *DelayedCount += (UINT32) SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed);
}
- if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && (SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0)) {
- continue;
+ if (BlockedCount != NULL) {
+ *BlockedCount += (UINT32) SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked);
}
- return FALSE;
+ if (DisabledCount != NULL) {
+ *DisabledCount += (UINT32) SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable);
+ }
}
}
+}
- return TRUE;
+/**
+ Checks if all CPUs (except Blocked & Disabled) have checked in for this SMI run
+
+ @retval TRUE if all CPUs the have checked in.
+ @retval FALSE if at least one Normal AP hasn't checked in.
+
+**/
+BOOLEAN
+AllCpusInSmmExceptBlockedDisabled (
+ VOID
+ )
+{
+ UINT32 BlockedCount;
+ UINT32 DisabledCount;
+
+ BlockedCount = 0;
+ DisabledCount = 0;
+
+ //
+ // Check to make sure mSmmMpSyncData->Counter is valid and not locked.
+ //
+ ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
+
+ //
+ // Check whether all CPUs in SMM.
+ //
+ if (*mSmmMpSyncData->Counter == mNumberOfCpus ) {
+ return TRUE;
+ }
+
+ //
+ // Check for the Blocked & Disabled Exceptions Case.
+ //
+ GetSmmDelayedBlockedDisabledCount (NULL, &BlockedCount, &DisabledCount);
+
+ //
+ // *mSmmMpSyncData->Counter might be updated by all APs concurrently. The value
+ // can be dynamic changed. If some Aps enter the SMI after the BlockedCount &
+ // DisabledCount check, then the *mSmmMpSyncData->Counter will be increased, thus
+ // leading the *mSmmMpSyncData->Counter + BlockedCount + DisabledCount > mNumberOfCpus.
+ // since the BlockedCount & DisabledCount are local variable, it's ok here only for
+ // the checking of all CPUs In Smm.
+ //
+ if (*mSmmMpSyncData->Counter + BlockedCount + DisabledCount >= mNumberOfCpus) {
+ return TRUE;
+ }
+
+ return FALSE;
}
/**
Has OS enabled Lmce in the MSR_IA32_MCG_EXT_CTL
@@ -266,10 +345,15 @@ SmmWaitForApArrival (
{
UINT64 Timer;
UINTN Index;
BOOLEAN LmceEn;
BOOLEAN LmceSignal;
+ UINT32 DelayedCount;
+ UINT32 BlockedCount;
+
+ DelayedCount = 0;
+ BlockedCount = 0;
ASSERT (*mSmmMpSyncData->Counter <= mNumberOfCpus);
LmceEn = FALSE;
LmceSignal = FALSE;
@@ -294,11 +378,11 @@ SmmWaitForApArrival (
//
for (Timer = StartSyncTimer ();
!IsSyncTimerTimeout (Timer) && !(LmceEn && LmceSignal);
)
{
- mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);
+ mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
if (mSmmMpSyncData->AllApArrivedWithException) {
break;
}
CpuPause ();
@@ -335,19 +419,27 @@ SmmWaitForApArrival (
//
for (Timer = StartSyncTimer ();
!IsSyncTimerTimeout (Timer);
)
{
- mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED);
+ mSmmMpSyncData->AllApArrivedWithException = AllCpusInSmmExceptBlockedDisabled ();
if (mSmmMpSyncData->AllApArrivedWithException) {
break;
}
CpuPause ();
}
}
+ if (!mSmmMpSyncData->AllApArrivedWithException) {
+ //
+ // Check for the Blocked & Delayed Case.
+ //
+ GetSmmDelayedBlockedDisabledCount (&DelayedCount, &BlockedCount, NULL);
+ DEBUG ((DEBUG_INFO, "SmmWaitForApArrival: Delayed AP Count = %d, Blocked AP Count = %d\n", DelayedCount, BlockedCount));
+ }
+
return;
}
/**
Replace OS MTRR's with SMI MTRR's.
@@ -737,10 +829,11 @@ APHandler (
// BSP timeout in the first round
//
if (mSmmMpSyncData->BspIndex != -1) {
//
// BSP Index is known
+ // Existing AP is in SMI now but BSP not in, so, try bring BSP in SMM.
//
BspIndex = mSmmMpSyncData->BspIndex;
ASSERT (CpuIndex != BspIndex);
//
@@ -761,16 +854,19 @@ APHandler (
if (!(*mSmmMpSyncData->InsideSmm)) {
//
// Give up since BSP is unable to enter SMM
// and signal the completion of this AP
+ // Reduce the mSmmMpSyncData->Counter!
+ //
WaitForSemaphore (mSmmMpSyncData->Counter);
return;
}
} else {
//
// Don't know BSP index. Give up without sending IPI to BSP.
+ // Reduce the mSmmMpSyncData->Counter!
//
WaitForSemaphore (mSmmMpSyncData->Counter);
return;
}
}
@@ -1666,14 +1762,17 @@ SmiRendezvous (
//
goto Exit;
} else {
//
// Signal presence of this processor
+ // mSmmMpSyncData->Counter is increased here!
+ // "ReleaseSemaphore (mSmmMpSyncData->Counter) == 0" means BSP has already ended the synchronization.
//
if (ReleaseSemaphore (mSmmMpSyncData->Counter) == 0) {
//
// BSP has already ended the synchronization, so QUIT!!!
+ // Existing AP is too late now to enter SMI since BSP has already ended the synchronization!!!
//
//
// Wait for BSP's signal to finish SMI
//
@@ -1781,10 +1880,50 @@ Exit:
// Restore Cr2
//
RestoreCr2 (Cr2);
}
+/**
+ Initialize PackageBsp Info. Processor specified by mPackageBspInfo[PackageIndex] will do the
+ package-scope register programming. Set default CpuIndex to (UINT32)-1, which means not
+ specified yet.
+
+**/
+VOID
+InitializePackageBspInfo (
+ VOID
+ )
+{
+ UINT32 Index;
+ UINT32 PackageId;
+ UINT32 PackageCount;
+
+ PackageId = 0;
+ PackageCount = 0;
+
+ //
+ // Count the number of package, set to max PackageId + 1
+ //
+ for (Index = 0; Index < mNumberOfCpus; Index++) {
+ if (PackageId < gSmmCpuPrivate->ProcessorInfo[Index].Location.Package) {
+ PackageId = gSmmCpuPrivate->ProcessorInfo[Index].Location.Package;
+ }
+ }
+ PackageCount = PackageId + 1;
+
+ mPackageBspInfo = (UINT32 *)AllocatePool (sizeof (UINT32) * PackageCount);
+ ASSERT (mPackageBspInfo != NULL);
+ if (mPackageBspInfo == NULL) {
+ return;
+ }
+
+ //
+ // Set default CpuIndex to (UINT32)-1, which means not specified yet.
+ //
+ SetMem32 (mPackageBspInfo, sizeof (UINT32) * PackageCount, (UINT32)-1);
+}
+
/**
Allocate buffer for SpinLock and Wrapper function buffer.
**/
VOID
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
index 40aabeda72..9383124e4d 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
@@ -1043,10 +1043,15 @@ PiCpuSmmEntry (
//
// Initialize global buffer for MM MP.
//
InitializeDataForMmMp ();
+ //
+ // Initialize PackageBsp Info.
+ //
+ InitializePackageBspInfo ();
+
//
// Install the SMM Mp Protocol into SMM protocol database
//
Status = gSmst->SmmInstallProtocolInterface (
&mSmmCpuHandle,
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
index ef8bf5947d..896aa669fe 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -192,15 +192,10 @@ typedef struct {
#define EXCEPTION_VECTOR_NUMBER 0x20
#define INVALID_APIC_ID 0xFFFFFFFFFFFFFFFFULL
-typedef UINT32 SMM_CPU_ARRIVAL_EXCEPTIONS;
-#define ARRIVAL_EXCEPTION_BLOCKED 0x1
-#define ARRIVAL_EXCEPTION_DELAYED 0x2
-#define ARRIVAL_EXCEPTION_SMI_DISABLED 0x4
-
//
// Wrapper used to convert EFI_AP_PROCEDURE2 and EFI_AP_PROCEDURE.
//
typedef struct {
EFI_AP_PROCEDURE Procedure;
@@ -1460,10 +1455,21 @@ EFI_STATUS
RegisterStartupProcedure (
IN EFI_AP_PROCEDURE Procedure,
IN OUT VOID *ProcedureArguments OPTIONAL
);
+/**
+ Initialize PackageBsp Info. Processor specified by mPackageBspInfo[PackageIndex] will do the
+ package-scope register programming. Set default CpuIndex to (UINT32)-1, which means not
+ specified yet.
+
+**/
+VOID
+InitializePackageBspInfo (
+ VOID
+ );
+
/**
Allocate buffer for SpinLock and Wrapper function buffer.
**/
VOID
--
2.16.2.windows.1
next reply other threads:[~2022-11-29 8:18 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-29 8:18 Wu, Jiaxin [this message]
2022-11-29 9:11 ` [PATCH v1] [PATCH v1] UefiCpuPkg: Check SMM Delayed/Blocked AP Count to decide all CPUs in SMI or not Ni, Ray
2022-11-30 3:29 ` Wu, Jiaxin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-list from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221129081852.12888-1-jiaxin.wu@intel.com \
--to=devel@edk2.groups.io \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox