From: "Ni, Ruiyu" <ruiyu.ni@Intel.com>
To: Eric Dong <eric.dong@intel.com>, edk2-devel@lists.01.org
Cc: Laszlo Ersek <lersek@redhat.com>
Subject: Re: [Patch v2 4/6] UefiCpuPkg/PiSmmCpuDxeSmm: Add logic to support semaphore type.
Date: Thu, 18 Oct 2018 13:54:24 +0800 [thread overview]
Message-ID: <aee09a30-8299-c417-ffb2-d88c340e985a@Intel.com> (raw)
In-Reply-To: <20181017021635.14972-5-eric.dong@intel.com>
On 10/17/2018 10:16 AM, Eric Dong wrote:
> Because this driver needs to set MSRs saved in normal boot phase, sync semaphore
> logic from RegisterCpuFeaturesLib code which used for normal boot phase.
>
> Detail see below change for RegisterCpuFeaturesLib:
> UefiCpuPkg/RegisterCpuFeaturesLib: Add logic to support semaphore type.
>
> Cc: Ruiyu Ni <ruiyu.ni@intel.com>
> Cc: Laszlo Ersek <lersek@redhat.com>
> Contributed-under: TianoCore Contribution Agreement 1.1
> Signed-off-by: Eric Dong <eric.dong@intel.com>
> ---
> UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c | 406 +++++++++++++++++++----------
> UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c | 3 -
> UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h | 3 +-
> 3 files changed, 268 insertions(+), 144 deletions(-)
>
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
> index 52ff9679d5..42a889f08f 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c
> @@ -38,9 +38,12 @@ typedef struct {
> } MP_ASSEMBLY_ADDRESS_MAP;
>
> //
> -// Spin lock used to serialize MemoryMapped operation
> +// Flags used when program the register.
> //
> -SPIN_LOCK *mMemoryMappedLock = NULL;
> +typedef struct {
> + volatile UINTN MemoryMappedLock; // Spinlock used to program mmio
> + volatile UINT32 *SemaphoreCount; // Semaphore used to program semaphore.
> +} PROGRAM_CPU_REGISTER_FLAGS;
>
> //
> // Signal that SMM BASE relocation is complete.
> @@ -62,13 +65,11 @@ AsmGetAddressMap (
> #define LEGACY_REGION_SIZE (2 * 0x1000)
> #define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE)
>
> +PROGRAM_CPU_REGISTER_FLAGS mCpuFlags;
> ACPI_CPU_DATA mAcpiCpuData;
> volatile UINT32 mNumberToFinish;
> MP_CPU_EXCHANGE_INFO *mExchangeInfo;
> BOOLEAN mRestoreSmmConfigurationInS3 = FALSE;
> -MP_MSR_LOCK *mMsrSpinLocks = NULL;
> -UINTN mMsrSpinLockCount;
> -UINTN mMsrCount = 0;
>
> //
> // S3 boot flag
> @@ -91,89 +92,6 @@ UINT8 mApHltLoopCodeTemplate[] = {
> 0xEB, 0xFC // jmp $-2
> };
>
> -/**
> - Get MSR spin lock by MSR index.
> -
> - @param MsrIndex MSR index value.
> -
> - @return Pointer to MSR spin lock.
> -
> -**/
> -SPIN_LOCK *
> -GetMsrSpinLockByIndex (
> - IN UINT32 MsrIndex
> - )
> -{
> - UINTN Index;
> - for (Index = 0; Index < mMsrCount; Index++) {
> - if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) {
> - return mMsrSpinLocks[Index].SpinLock;
> - }
> - }
> - return NULL;
> -}
> -
> -/**
> - Initialize MSR spin lock by MSR index.
> -
> - @param MsrIndex MSR index value.
> -
> -**/
> -VOID
> -InitMsrSpinLockByIndex (
> - IN UINT32 MsrIndex
> - )
> -{
> - UINTN MsrSpinLockCount;
> - UINTN NewMsrSpinLockCount;
> - UINTN Index;
> - UINTN AddedSize;
> -
> - if (mMsrSpinLocks == NULL) {
> - MsrSpinLockCount = mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter;
> - mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * MsrSpinLockCount);
> - ASSERT (mMsrSpinLocks != NULL);
> - for (Index = 0; Index < MsrSpinLockCount; Index++) {
> - mMsrSpinLocks[Index].SpinLock =
> - (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr + Index * mSemaphoreSize);
> - mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
> - }
> - mMsrSpinLockCount = MsrSpinLockCount;
> - mSmmCpuSemaphores.SemaphoreMsr.AvailableCounter = 0;
> - }
> - if (GetMsrSpinLockByIndex (MsrIndex) == NULL) {
> - //
> - // Initialize spin lock for MSR programming
> - //
> - mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex;
> - InitializeSpinLock (mMsrSpinLocks[mMsrCount].SpinLock);
> - mMsrCount ++;
> - if (mMsrCount == mMsrSpinLockCount) {
> - //
> - // If MSR spin lock buffer is full, enlarge it
> - //
> - AddedSize = SIZE_4KB;
> - mSmmCpuSemaphores.SemaphoreMsr.Msr =
> - AllocatePages (EFI_SIZE_TO_PAGES(AddedSize));
> - ASSERT (mSmmCpuSemaphores.SemaphoreMsr.Msr != NULL);
> - NewMsrSpinLockCount = mMsrSpinLockCount + AddedSize / mSemaphoreSize;
> - mMsrSpinLocks = ReallocatePool (
> - sizeof (MP_MSR_LOCK) * mMsrSpinLockCount,
> - sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount,
> - mMsrSpinLocks
> - );
> - ASSERT (mMsrSpinLocks != NULL);
> - mMsrSpinLockCount = NewMsrSpinLockCount;
> - for (Index = mMsrCount; Index < mMsrSpinLockCount; Index++) {
> - mMsrSpinLocks[Index].SpinLock =
> - (SPIN_LOCK *)((UINTN)mSmmCpuSemaphores.SemaphoreMsr.Msr +
> - (Index - mMsrCount) * mSemaphoreSize);
> - mMsrSpinLocks[Index].MsrIndex = (UINT32)-1;
> - }
> - }
> - }
> -}
> -
> /**
> Sync up the MTRR values for all processors.
>
> @@ -204,42 +122,131 @@ Returns:
> }
>
> /**
> - Programs registers for the calling processor.
> + Increment semaphore by 1.
> +
> + @param Sem IN: 32-bit unsigned integer
> +
> +**/
> +VOID
> +S3ReleaseSemaphore (
> + IN OUT volatile UINT32 *Sem
> + )
> +{
> + InterlockedIncrement (Sem);
> +}
> +
> +/**
> + Decrement the semaphore by 1 if it is not zero.
>
> - This function programs registers for the calling processor.
> + Performs an atomic decrement operation for semaphore.
> + The compare exchange operation must be performed using
> + MP safe mechanisms.
>
> - @param RegisterTables Pointer to register table of the running processor.
> - @param RegisterTableCount Register table count.
> + @param Sem IN: 32-bit unsigned integer
>
> **/
> VOID
> -SetProcessorRegister (
> - IN CPU_REGISTER_TABLE *RegisterTables,
> - IN UINTN RegisterTableCount
> +S3WaitForSemaphore (
> + IN OUT volatile UINT32 *Sem
> + )
> +{
> + UINT32 Value;
> +
> + do {
> + Value = *Sem;
> + } while (Value == 0 ||
> + InterlockedCompareExchange32 (
> + Sem,
> + Value,
> + Value - 1
> + ) != Value);
> +}
> +
> +/**
> + Get the string value for the input register type.
> +
> + @param[in] RegisterType Input the register type.
> +
> + @retval Return the register type string.
> +**/
> +CHAR16 *
> +GetRegisterTypeString (
> + IN REGISTER_TYPE RegisterType
> + )
> +{
> + switch (RegisterType) {
> + case Msr:
> + return L"Msr";
> +
> + case ControlRegister:
> + return L"ControlRegister";
> +
> + case MemoryMapped:
> + return L"MemoryMapped";
> +
> + case CacheControl:
> + return L"CacheControl";
> +
> + case Semaphore:
> + return L"Semaphore";
> +
> + default:
> + ASSERT (FALSE);
> + return L"NoneType";
> + }
> +}
> +
> +/**
> + Initialize the CPU registers from a register table.
> +
> + @param[in] RegisterTable The register table for this AP.
> + @param[in] ApLocation AP location info for this ap.
> + @param[in] CpuStatus CPU status info for this CPU.
> + @param[in] CpuFlags Flags data structure used when program the register.
> +
> + @note This service could be called by BSP/APs.
> +**/
> +VOID
> +ProgramProcessorRegister (
> + IN CPU_REGISTER_TABLE *RegisterTable,
> + IN EFI_CPU_PHYSICAL_LOCATION *ApLocation,
> + IN CPU_STATUS_INFORMATION *CpuStatus,
> + IN PROGRAM_CPU_REGISTER_FLAGS *CpuFlags
> )
> {
> CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
> UINTN Index;
> UINTN Value;
> - SPIN_LOCK *MsrSpinLock;
> - UINT32 InitApicId;
> - CPU_REGISTER_TABLE *RegisterTable;
> -
> - InitApicId = GetInitialApicId ();
> - RegisterTable = NULL;
> - for (Index = 0; Index < RegisterTableCount; Index++) {
> - if (RegisterTables[Index].InitialApicId == InitApicId) {
> - RegisterTable = &RegisterTables[Index];
> - break;
> - }
> - }
> - ASSERT (RegisterTable != NULL);
> + CPU_REGISTER_TABLE_ENTRY *RegisterTableEntryHead;
> + volatile UINT32 *SemaphorePtr;
> + UINT32 FirstThread;
> + UINT32 PackageThreadsCount;
> + UINT32 CurrentThread;
> + UINTN ProcessorIndex;
> + UINTN ThreadIndex;
> + UINTN ValidThreadCount;
> + UINT32 *ValidCorePerPackage;
> +
> + ThreadIndex = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount +
> + ApLocation->Core * CpuStatus->MaxThreadCount +
> + ApLocation->Thread;
>
> //
> // Traverse Register Table of this logical processor
> //
> - RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
> - for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) {
> + RegisterTableEntryHead = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry;
> +
> + for (Index = 0; Index < RegisterTable->TableLength; Index++) {
> +
> + RegisterTableEntry = &RegisterTableEntryHead[Index];
> + DEBUG ((
> + DEBUG_INFO,
> + "Processor = %lu, Entry Index %lu, Type = %s!\n",
> + (UINT64)ThreadIndex,
> + (UINT64)Index,
> + GetRegisterTypeString((REGISTER_TYPE)RegisterTableEntry->RegisterType)
> + ));
> +
> //
> // Check the type of specified register
> //
> @@ -310,12 +317,6 @@ SetProcessorRegister (
> RegisterTableEntry->Value
> );
> } else {
> - //
> - // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode
> - // to make sure MSR read/write operation is atomic.
> - //
> - MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index);
> - AcquireSpinLock (MsrSpinLock);
> //
> // Set the bit section according to bit start and length
> //
> @@ -325,21 +326,20 @@ SetProcessorRegister (
> RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
> RegisterTableEntry->Value
> );
> - ReleaseSpinLock (MsrSpinLock);
> }
> break;
> //
> // MemoryMapped operations
> //
> case MemoryMapped:
> - AcquireSpinLock (mMemoryMappedLock);
> + AcquireSpinLock (&CpuFlags->MemoryMappedLock);
> MmioBitFieldWrite32 (
> (UINTN)(RegisterTableEntry->Index | LShiftU64 (RegisterTableEntry->HighIndex, 32)),
> RegisterTableEntry->ValidBitStart,
> RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1,
> (UINT32)RegisterTableEntry->Value
> );
> - ReleaseSpinLock (mMemoryMappedLock);
> + ReleaseSpinLock (&CpuFlags->MemoryMappedLock);
> break;
> //
> // Enable or disable cache
> @@ -355,12 +355,126 @@ SetProcessorRegister (
> }
> break;
>
> + case Semaphore:
> + // Semaphore works logic like below:
> + //
> + // V(x) = LibReleaseSemaphore (Semaphore[FirstThread + x]);
> + // P(x) = LibWaitForSemaphore (Semaphore[FirstThread + x]);
> + //
> + // All threads (T0...Tn) waits in P() line and continues running
> + // together.
> + //
> + //
> + // T0 T1 ... Tn
> + //
> + // V(0...n) V(0...n) ... V(0...n)
> + // n * P(0) n * P(1) ... n * P(n)
> + //
> + SemaphorePtr = CpuFlags->SemaphoreCount;
> + switch (RegisterTableEntry->Value) {
> + case CoreDepType:
> + //
> + // Get Offset info for the first thread in the core which current thread belongs to.
> + //
> + FirstThread = (ApLocation->Package * CpuStatus->MaxCoreCount + ApLocation->Core) * CpuStatus->MaxThreadCount;
> + CurrentThread = FirstThread + ApLocation->Thread;
> + //
> + // First Notify all threads in current Core that this thread has ready.
> + //
> + for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
> + S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
> + }
> + //
> + // Second, check whether all valid threads in current core have ready.
> + //
> + for (ProcessorIndex = 0; ProcessorIndex < CpuStatus->MaxThreadCount; ProcessorIndex ++) {
> + S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
> + }
> + break;
> +
> + case PackageDepType:
> + ValidCorePerPackage = (UINT32 *)(UINTN)CpuStatus->ValidCoresPerPackages;
> + //
> + // Get Offset info for the first thread in the package which current thread belongs to.
> + //
> + FirstThread = ApLocation->Package * CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount;
> + //
> + // Get the possible threads count for current package.
> + //
> + PackageThreadsCount = CpuStatus->MaxThreadCount * CpuStatus->MaxCoreCount;
> + CurrentThread = FirstThread + CpuStatus->MaxThreadCount * ApLocation->Core + ApLocation->Thread;
> + //
> + // Get the valid thread count for current package.
> + //
> + ValidThreadCount = CpuStatus->MaxThreadCount * ValidCorePerPackage[ApLocation->Package];
> + //
> + // First Notify all threads in current package that this thread has ready.
> + //
> + for (ProcessorIndex = 0; ProcessorIndex < PackageThreadsCount ; ProcessorIndex ++) {
> + S3ReleaseSemaphore (&SemaphorePtr[FirstThread + ProcessorIndex]);
> + }
> + //
> + // Second, check whether all valid threads in current package have ready.
> + //
> + for (ProcessorIndex = 0; ProcessorIndex < ValidThreadCount; ProcessorIndex ++) {
> + S3WaitForSemaphore (&SemaphorePtr[CurrentThread]);
> + }
> + break;
> +
> + default:
> + break;
> + }
> + break;
> +
> default:
> break;
> }
> }
> }
>
> +/**
> +
> + Set Processor register for one AP.
> +
> + @param PreSmmRegisterTable Use pre Smm register table or register table.
> +
> +**/
> +VOID
> +SetRegister (
> + IN BOOLEAN PreSmmRegisterTable
> + )
> +{
> + CPU_REGISTER_TABLE *RegisterTable;
> + CPU_REGISTER_TABLE *RegisterTables;
> + UINT32 InitApicId;
> + UINTN ProcIndex;
> + UINTN Index;
> +
> + if (PreSmmRegisterTable) {
> + RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable;
> + } else {
> + RegisterTables = (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable;
> + }
> +
> + InitApicId = GetInitialApicId ();
> + RegisterTable = NULL;
> + for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) {
> + if (RegisterTables[Index].InitialApicId == InitApicId) {
> + RegisterTable = &RegisterTables[Index];
> + ProcIndex = Index;
> + break;
> + }
> + }
> + ASSERT (RegisterTable != NULL);
> +
> + ProgramProcessorRegister (
> + RegisterTable,
> + (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)mAcpiCpuData.ApLocation + ProcIndex,
> + &mAcpiCpuData.CpuStatus,
> + &mCpuFlags
> + );
> +}
> +
> /**
> AP initialization before then after SMBASE relocation in the S3 boot path.
> **/
> @@ -374,7 +488,7 @@ InitializeAp (
>
> LoadMtrrData (mAcpiCpuData.MtrrTable);
>
> - SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
> + SetRegister (TRUE);
>
> //
> // Count down the number with lock mechanism.
> @@ -391,7 +505,7 @@ InitializeAp (
> ProgramVirtualWireMode ();
> DisableLvtInterrupts ();
>
> - SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
> + SetRegister (FALSE);
>
> //
> // Place AP into the safe code, count down the number with lock mechanism in the safe code.
> @@ -466,7 +580,7 @@ InitializeCpuBeforeRebase (
> {
> LoadMtrrData (mAcpiCpuData.MtrrTable);
>
> - SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable, mAcpiCpuData.NumberOfCpus);
> + SetRegister (TRUE);
>
> ProgramVirtualWireMode ();
>
> @@ -502,15 +616,24 @@ InitializeCpuAfterRebase (
> VOID
> )
> {
> - SetProcessorRegister ((CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable, mAcpiCpuData.NumberOfCpus);
> -
> mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1;
>
> //
> - // Signal that SMM base relocation is complete and to continue initialization.
> + // Signal that SMM base relocation is complete and to continue initialization for all APs.
> //
> mInitApsAfterSmmBaseReloc = TRUE;
>
> + //
> + // Must begin set register after all APs have continue their initialization.
> + // This is a requirement to support semaphore mechanism in register table.
> + // Because if semaphore's dependence type is package type, semaphore will wait
> + // for all Aps in one package finishing their tasks before set next register
> + // for all APs. If the Aps not begin its task during BSP doing its task, the
> + // BSP thread will hang because it is waiting for other Aps in the same
> + // package finishing their task.
> + //
> + SetRegister (FALSE);
> +
> while (mNumberToFinish > 0) {
> CpuPause ();
> }
> @@ -574,8 +697,6 @@ SmmRestoreCpu (
>
> mSmmS3Flag = TRUE;
>
> - InitializeSpinLock (mMemoryMappedLock);
> -
> //
> // See if there is enough context to resume PEI Phase
> //
> @@ -790,7 +911,6 @@ CopyRegisterTable (
> )
> {
> UINTN Index;
> - UINTN Index1;
> CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry;
>
> CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE));
> @@ -802,17 +922,6 @@ CopyRegisterTable (
> );
> ASSERT (RegisterTableEntry != NULL);
> DestinationRegisterTableList[Index].RegisterTableEntry = (EFI_PHYSICAL_ADDRESS)(UINTN)RegisterTableEntry;
> - //
> - // Go though all MSRs in register table to initialize MSR spin lock
> - //
> - for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) {
> - if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) {
> - //
> - // Initialize MSR spin lock only for those MSRs need bit field writing
> - //
> - InitMsrSpinLockByIndex (RegisterTableEntry->Index);
> - }
> - }
> }
> }
> }
> @@ -832,6 +941,7 @@ GetAcpiCpuData (
> VOID *GdtForAp;
> VOID *IdtForAp;
> VOID *MachineCheckHandlerForAp;
> + CPU_STATUS_INFORMATION *CpuStatus;
>
> if (!mAcpiS3Enable) {
> return;
> @@ -906,6 +1016,24 @@ GetAcpiCpuData (
> Gdtr->Base = (UINTN)GdtForAp;
> Idtr->Base = (UINTN)IdtForAp;
> mAcpiCpuData.ApMachineCheckHandlerBase = (EFI_PHYSICAL_ADDRESS)(UINTN)MachineCheckHandlerForAp;
> +
> + CpuStatus = &mAcpiCpuData.CpuStatus;
> + CopyMem (CpuStatus, &AcpiCpuData->CpuStatus, sizeof (CPU_STATUS_INFORMATION));
> + CpuStatus->ValidCoresPerPackages = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
> + sizeof (UINT32) * CpuStatus->PackageCount,
> + (UINT32 *)(UINTN)AcpiCpuData->CpuStatus.ValidCoresPerPackages
> + );
> + ASSERT (CpuStatus->ValidCoresPerPackages != 0);
> + mAcpiCpuData.ApLocation = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocateCopyPool (
> + mAcpiCpuData.NumberOfCpus * sizeof (EFI_CPU_PHYSICAL_LOCATION),
> + (EFI_CPU_PHYSICAL_LOCATION *)(UINTN)AcpiCpuData->ApLocation
> + );
> + ASSERT (mAcpiCpuData.ApLocation != 0);
> + InitializeSpinLock((SPIN_LOCK*) &mCpuFlags.MemoryMappedLock);
> + mCpuFlags.SemaphoreCount = AllocateZeroPool (
> + sizeof (UINT32) * CpuStatus->PackageCount *
> + CpuStatus->MaxCoreCount * CpuStatus->MaxThreadCount);
> + ASSERT (mCpuFlags.SemaphoreCount != NULL);
> }
>
> /**
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
> index 9cf508a5c7..42b040531e 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
> @@ -1303,8 +1303,6 @@ InitializeSmmCpuSemaphores (
> mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock
> = (SPIN_LOCK *)SemaphoreAddr;
> SemaphoreAddr += SemaphoreSize;
> - mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock
> - = (SPIN_LOCK *)SemaphoreAddr;
>
> SemaphoreAddr = (UINTN)SemaphoreBlock + GlobalSemaphoresSize;
> mSmmCpuSemaphores.SemaphoreCpu.Busy = (SPIN_LOCK *)SemaphoreAddr;
> @@ -1321,7 +1319,6 @@ InitializeSmmCpuSemaphores (
>
> mPFLock = mSmmCpuSemaphores.SemaphoreGlobal.PFLock;
> mConfigSmmCodeAccessCheckLock = mSmmCpuSemaphores.SemaphoreGlobal.CodeAccessCheckLock;
> - mMemoryMappedLock = mSmmCpuSemaphores.SemaphoreGlobal.MemoryMappedLock;
>
> mSemaphoreSize = SemaphoreSize;
> }
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> index 8c7f4996d1..e2970308fe 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
> @@ -53,6 +53,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
> #include <Library/ReportStatusCodeLib.h>
> #include <Library/SmmCpuFeaturesLib.h>
> #include <Library/PeCoffGetEntryPointLib.h>
> +#include <Library/RegisterCpuFeaturesLib.h>
>
> #include <AcpiCpuData.h>
> #include <CpuHotPlugData.h>
> @@ -364,7 +365,6 @@ typedef struct {
> volatile BOOLEAN *AllCpusInSync;
> SPIN_LOCK *PFLock;
> SPIN_LOCK *CodeAccessCheckLock;
> - SPIN_LOCK *MemoryMappedLock;
> } SMM_CPU_SEMAPHORE_GLOBAL;
>
> ///
> @@ -409,7 +409,6 @@ extern SMM_CPU_SEMAPHORES mSmmCpuSemaphores;
> extern UINTN mSemaphoreSize;
> extern SPIN_LOCK *mPFLock;
> extern SPIN_LOCK *mConfigSmmCodeAccessCheckLock;
> -extern SPIN_LOCK *mMemoryMappedLock;
> extern EFI_SMRAM_DESCRIPTOR *mSmmCpuSmramRanges;
> extern UINTN mSmmCpuSmramRangeCount;
> extern UINT8 mPhysicalAddressBits;
>
As the code change is similar to patch 3/6, please refer to the comments
for 3/6.
--
Thanks,
Ray
next prev parent reply other threads:[~2018-10-18 5:53 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-10-17 2:16 [Patch v2 0/6] Fix performance issue caused by Set MSR task Eric Dong
2018-10-17 2:16 ` [Patch v2 1/6] UefiCpuPkg/Include/AcpiCpuData.h: Add Semaphore related Information Eric Dong
2018-10-18 3:04 ` Ni, Ruiyu
2018-10-18 3:10 ` Ni, Ruiyu
2018-10-17 2:16 ` [Patch v2 2/6] UefiCpuPkg/RegisterCpuFeaturesLib.h: Add new dependence types Eric Dong
2018-10-18 3:31 ` Ni, Ruiyu
2018-10-17 2:16 ` [Patch v2 3/6] UefiCpuPkg/RegisterCpuFeaturesLib: Add logic to support semaphore type Eric Dong
2018-10-18 5:46 ` Ni, Ruiyu
2018-10-17 2:16 ` [Patch v2 4/6] UefiCpuPkg/PiSmmCpuDxeSmm: " Eric Dong
2018-10-18 5:54 ` Ni, Ruiyu [this message]
2018-10-17 2:16 ` [Patch v2 5/6] UefiCpuPkg/CpuS3DataDxe: Keep old data if value already existed Eric Dong
2018-10-18 5:57 ` Ni, Ruiyu
2018-10-17 2:16 ` [Patch v2 6/6] UefiCpuPkg/CpuCommonFeaturesLib: Register MSR base on scope Info Eric Dong
2018-10-18 6:01 ` Ni, Ruiyu
2018-10-17 17:33 ` [Patch v2 0/6] Fix performance issue caused by Set MSR task Laszlo Ersek
2018-10-18 7:36 ` Dong, Eric
2018-10-18 2:12 ` Ni, Ruiyu
2018-10-18 2:35 ` Dong, Eric
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-list from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=aee09a30-8299-c417-ffb2-d88c340e985a@Intel.com \
--to=devel@edk2.groups.io \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox