* [PATCH V3 3/6] MdeModulePkg/PiSmmCore: Add MemoryAttributes support.
2016-11-11 9:00 [PATCH V3 0/6] Enable SMM page level protection Jiewen Yao
2016-11-11 9:00 ` [PATCH V3 1/6] MdeModulePkg/Include: Add PiSmmMemoryAttributesTable.h Jiewen Yao
2016-11-11 9:00 ` [PATCH V3 2/6] MdeModulePkg/dec: Add gEdkiiPiSmmMemoryAttributesTableGuid Jiewen Yao
@ 2016-11-11 9:00 ` Jiewen Yao
2016-11-11 9:00 ` [PATCH V3 4/6] UefiCpuPkg/dec: Add PcdCpuSmmStaticPageTable Jiewen Yao
` (4 subsequent siblings)
7 siblings, 0 replies; 21+ messages in thread
From: Jiewen Yao @ 2016-11-11 9:00 UTC (permalink / raw)
To: edk2-devel
Cc: Jeff Fan, Feng Tian, Star Zeng, Michael D Kinney, Laszlo Ersek,
Paolo Bonzini
1) This patch installs LoadedImage protocol to SMM
protocol database, so that the SMM image info can be
got easily to construct the PiSmmMemoryAttributes table.
This table is produced at SmmEndOfDxe event.
So that the consumer (PiSmmCpu) may consult this table
to set memory attribute in page table.
Cc: Jeff Fan <jeff.fan@intel.com>
Cc: Feng Tian <feng.tian@intel.com>
Cc: Star Zeng <star.zeng@intel.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Jiewen Yao <jiewen.yao@intel.com>
---
MdeModulePkg/Core/PiSmmCore/Dispatcher.c | 66 +
MdeModulePkg/Core/PiSmmCore/MemoryAttributesTable.c | 1509 ++++++++++++++++++++
MdeModulePkg/Core/PiSmmCore/Page.c | 775 +++++++++-
MdeModulePkg/Core/PiSmmCore/PiSmmCore.c | 40 +
MdeModulePkg/Core/PiSmmCore/PiSmmCore.h | 91 ++
MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf | 2 +
MdeModulePkg/Core/PiSmmCore/Pool.c | 16 +
7 files changed, 2473 insertions(+), 26 deletions(-)
diff --git a/MdeModulePkg/Core/PiSmmCore/Dispatcher.c b/MdeModulePkg/Core/PiSmmCore/Dispatcher.c
index 87f4617..1bddaf1 100644
--- a/MdeModulePkg/Core/PiSmmCore/Dispatcher.c
+++ b/MdeModulePkg/Core/PiSmmCore/Dispatcher.c
@@ -580,6 +580,11 @@ SmmLoadImage (
DriverEntry->LoadedImage->SystemTable = gST;
DriverEntry->LoadedImage->DeviceHandle = DeviceHandle;
+ DriverEntry->SmmLoadedImage.Revision = EFI_LOADED_IMAGE_PROTOCOL_REVISION;
+ DriverEntry->SmmLoadedImage.ParentHandle = gSmmCorePrivate->SmmIplImageHandle;
+ DriverEntry->SmmLoadedImage.SystemTable = gST;
+ DriverEntry->SmmLoadedImage.DeviceHandle = DeviceHandle;
+
//
// Make an EfiBootServicesData buffer copy of FilePath
//
@@ -599,6 +604,25 @@ SmmLoadImage (
DriverEntry->LoadedImage->ImageDataType = EfiRuntimeServicesData;
//
+ // Make a buffer copy of FilePath
+ //
+ Status = SmmAllocatePool (EfiRuntimeServicesData, GetDevicePathSize(FilePath), (VOID **)&DriverEntry->SmmLoadedImage.FilePath);
+ if (EFI_ERROR (Status)) {
+ if (Buffer != NULL) {
+ gBS->FreePool (Buffer);
+ }
+ gBS->FreePool (DriverEntry->LoadedImage->FilePath);
+ SmmFreePages (DstBuffer, PageCount);
+ return Status;
+ }
+ CopyMem (DriverEntry->SmmLoadedImage.FilePath, FilePath, GetDevicePathSize(FilePath));
+
+ DriverEntry->SmmLoadedImage.ImageBase = (VOID *)(UINTN)DriverEntry->ImageBuffer;
+ DriverEntry->SmmLoadedImage.ImageSize = ImageContext.ImageSize;
+ DriverEntry->SmmLoadedImage.ImageCodeType = EfiRuntimeServicesCode;
+ DriverEntry->SmmLoadedImage.ImageDataType = EfiRuntimeServicesData;
+
+ //
// Create a new image handle in the UEFI handle database for the SMM Driver
//
DriverEntry->ImageHandle = NULL;
@@ -608,6 +632,17 @@ SmmLoadImage (
NULL
);
+ //
+ // Create a new image handle in the SMM handle database for the SMM Driver
+ //
+ DriverEntry->SmmImageHandle = NULL;
+ Status = SmmInstallProtocolInterface (
+ &DriverEntry->SmmImageHandle,
+ &gEfiLoadedImageProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &DriverEntry->SmmLoadedImage
+ );
+
PERF_START (DriverEntry->ImageHandle, "LoadImage:", NULL, Tick);
PERF_END (DriverEntry->ImageHandle, "LoadImage:", NULL, 0);
@@ -896,6 +931,16 @@ SmmDispatcher (
}
gBS->FreePool (DriverEntry->LoadedImage);
}
+ Status = SmmUninstallProtocolInterface (
+ DriverEntry->SmmImageHandle,
+ &gEfiLoadedImageProtocolGuid,
+ &DriverEntry->SmmLoadedImage
+ );
+ if (!EFI_ERROR(Status)) {
+ if (DriverEntry->SmmLoadedImage.FilePath != NULL) {
+ SmmFreePool (DriverEntry->SmmLoadedImage.FilePath);
+ }
+ }
}
REPORT_STATUS_CODE_WITH_EXTENDED_DATA (
@@ -1327,6 +1372,27 @@ SmmDriverDispatchHandler (
mSmmCoreLoadedImage->DeviceHandle = FvHandle;
}
+ if (mSmmCoreDriverEntry->SmmLoadedImage.FilePath == NULL) {
+ //
+ // Maybe one special FV contains only one SMM_CORE module, so its device path must
+ // be initialized completely.
+ //
+ EfiInitializeFwVolDevicepathNode (&mFvDevicePath.File, &NameGuid);
+ SetDevicePathEndNode (&mFvDevicePath.End);
+
+ //
+ // Make a buffer copy FilePath
+ //
+ Status = SmmAllocatePool (
+ EfiRuntimeServicesData,
+ GetDevicePathSize ((EFI_DEVICE_PATH_PROTOCOL *)&mFvDevicePath),
+ (VOID **)&mSmmCoreDriverEntry->SmmLoadedImage.FilePath
+ );
+ ASSERT_EFI_ERROR (Status);
+ CopyMem (mSmmCoreDriverEntry->SmmLoadedImage.FilePath, &mFvDevicePath, GetDevicePathSize((EFI_DEVICE_PATH_PROTOCOL *)&mFvDevicePath));
+
+ mSmmCoreDriverEntry->SmmLoadedImage.DeviceHandle = FvHandle;
+ }
} else {
SmmAddToDriverList (Fv, FvHandle, &NameGuid);
}
diff --git a/MdeModulePkg/Core/PiSmmCore/MemoryAttributesTable.c b/MdeModulePkg/Core/PiSmmCore/MemoryAttributesTable.c
new file mode 100644
index 0000000..3a5a2c8
--- /dev/null
+++ b/MdeModulePkg/Core/PiSmmCore/MemoryAttributesTable.c
@@ -0,0 +1,1509 @@
+/** @file
+ PI SMM MemoryAttributes support
+
+Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <PiDxe.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/SmmServicesTableLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PcdLib.h>
+
+#include <Library/PeCoffLib.h>
+#include <Library/PeCoffGetEntryPointLib.h>
+
+#include <Guid/PiSmmMemoryAttributesTable.h>
+
+#include "PiSmmCore.h"
+
+#define PREVIOUS_MEMORY_DESCRIPTOR(MemoryDescriptor, Size) \
+ ((EFI_MEMORY_DESCRIPTOR *)((UINT8 *)(MemoryDescriptor) - (Size)))
+
+#define IMAGE_PROPERTIES_RECORD_CODE_SECTION_SIGNATURE SIGNATURE_32 ('I','P','R','C')
+
+typedef struct {
+ UINT32 Signature;
+ LIST_ENTRY Link;
+ EFI_PHYSICAL_ADDRESS CodeSegmentBase;
+ UINT64 CodeSegmentSize;
+} IMAGE_PROPERTIES_RECORD_CODE_SECTION;
+
+#define IMAGE_PROPERTIES_RECORD_SIGNATURE SIGNATURE_32 ('I','P','R','D')
+
+typedef struct {
+ UINT32 Signature;
+ LIST_ENTRY Link;
+ EFI_PHYSICAL_ADDRESS ImageBase;
+ UINT64 ImageSize;
+ UINTN CodeSegmentCount;
+ LIST_ENTRY CodeSegmentList;
+} IMAGE_PROPERTIES_RECORD;
+
+#define IMAGE_PROPERTIES_PRIVATE_DATA_SIGNATURE SIGNATURE_32 ('I','P','P','D')
+
+typedef struct {
+ UINT32 Signature;
+ UINTN ImageRecordCount;
+ UINTN CodeSegmentCountMax;
+ LIST_ENTRY ImageRecordList;
+} IMAGE_PROPERTIES_PRIVATE_DATA;
+
+IMAGE_PROPERTIES_PRIVATE_DATA mImagePropertiesPrivateData = {
+ IMAGE_PROPERTIES_PRIVATE_DATA_SIGNATURE,
+ 0,
+ 0,
+ INITIALIZE_LIST_HEAD_VARIABLE (mImagePropertiesPrivateData.ImageRecordList)
+};
+
+#define EFI_MEMORY_ATTRIBUTES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA BIT0
+
+UINT64 mMemoryProtectionAttribute = EFI_MEMORY_ATTRIBUTES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA;
+
+//
+// Below functions are for MemoryMap
+//
+
+/**
+ Converts a number of EFI_PAGEs to a size in bytes.
+
+ NOTE: Do not use EFI_PAGES_TO_SIZE because it handles UINTN only.
+
+ @param[in] Pages The number of EFI_PAGES.
+
+ @return The number of bytes associated with the number of EFI_PAGEs specified
+ by Pages.
+**/
+STATIC
+UINT64
+EfiPagesToSize (
+ IN UINT64 Pages
+ )
+{
+ return LShiftU64 (Pages, EFI_PAGE_SHIFT);
+}
+
+/**
+ Converts a size, in bytes, to a number of EFI_PAGESs.
+
+ NOTE: Do not use EFI_SIZE_TO_PAGES because it handles UINTN only.
+
+ @param[in] Size A size in bytes.
+
+ @return The number of EFI_PAGESs associated with the number of bytes specified
+ by Size.
+
+**/
+STATIC
+UINT64
+EfiSizeToPages (
+ IN UINT64 Size
+ )
+{
+ return RShiftU64 (Size, EFI_PAGE_SHIFT) + ((((UINTN)Size) & EFI_PAGE_MASK) ? 1 : 0);
+}
+
+/**
+ Check the consistency of Smm memory attributes table.
+
+ @param[in] MemoryAttributesTable PI SMM memory attributes table
+**/
+VOID
+SmmMemoryAttributesTableConsistencyCheck (
+ IN EDKII_PI_SMM_MEMORY_ATTRIBUTES_TABLE *MemoryAttributesTable
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMap;
+ UINTN MemoryMapEntryCount;
+ UINTN DescriptorSize;
+ UINTN Index;
+ UINT64 Address;
+
+ Address = 0;
+ MemoryMapEntryCount = MemoryAttributesTable->NumberOfEntries;
+ DescriptorSize = MemoryAttributesTable->DescriptorSize;
+ MemoryMap = (EFI_MEMORY_DESCRIPTOR *)(MemoryAttributesTable + 1);
+ for (Index = 0; Index < MemoryMapEntryCount; Index++) {
+ if (Address != 0) {
+ ASSERT (Address == MemoryMap->PhysicalStart);
+ }
+ Address = MemoryMap->PhysicalStart + EFI_PAGES_TO_SIZE(MemoryMap->NumberOfPages);
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, DescriptorSize);
+ }
+}
+
+/**
+ Sort memory map entries based upon PhysicalStart, from low to high.
+
+ @param[in] MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param[in] MemoryMapSize Size, in bytes, of the MemoryMap buffer.
+ @param[in] DescriptorSize Size, in bytes, of an individual EFI_MEMORY_DESCRIPTOR.
+**/
+STATIC
+VOID
+SortMemoryMap (
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ IN UINTN MemoryMapSize,
+ IN UINTN DescriptorSize
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *NextMemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEnd;
+ EFI_MEMORY_DESCRIPTOR TempMemoryMap;
+
+ MemoryMapEntry = MemoryMap;
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ MemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) MemoryMap + MemoryMapSize);
+ while (MemoryMapEntry < MemoryMapEnd) {
+ while (NextMemoryMapEntry < MemoryMapEnd) {
+ if (MemoryMapEntry->PhysicalStart > NextMemoryMapEntry->PhysicalStart) {
+ CopyMem (&TempMemoryMap, MemoryMapEntry, sizeof(EFI_MEMORY_DESCRIPTOR));
+ CopyMem (MemoryMapEntry, NextMemoryMapEntry, sizeof(EFI_MEMORY_DESCRIPTOR));
+ CopyMem (NextMemoryMapEntry, &TempMemoryMap, sizeof(EFI_MEMORY_DESCRIPTOR));
+ }
+
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (NextMemoryMapEntry, DescriptorSize);
+ }
+
+ MemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ }
+
+ return ;
+}
+
+/**
+ Merge continous memory map entries whose have same attributes.
+
+ @param[in, out] MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param[in, out] MemoryMapSize A pointer to the size, in bytes, of the
+ MemoryMap buffer. On input, this is the size of
+ the current memory map. On output,
+ it is the size of new memory map after merge.
+ @param[in] DescriptorSize Size, in bytes, of an individual EFI_MEMORY_DESCRIPTOR.
+**/
+STATIC
+VOID
+MergeMemoryMap (
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ IN OUT UINTN *MemoryMapSize,
+ IN UINTN DescriptorSize
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEnd;
+ UINT64 MemoryBlockLength;
+ EFI_MEMORY_DESCRIPTOR *NewMemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *NextMemoryMapEntry;
+
+ MemoryMapEntry = MemoryMap;
+ NewMemoryMapEntry = MemoryMap;
+ MemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) MemoryMap + *MemoryMapSize);
+ while ((UINTN)MemoryMapEntry < (UINTN)MemoryMapEnd) {
+ CopyMem (NewMemoryMapEntry, MemoryMapEntry, sizeof(EFI_MEMORY_DESCRIPTOR));
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+
+ do {
+ MemoryBlockLength = (UINT64) (EfiPagesToSize (MemoryMapEntry->NumberOfPages));
+ if (((UINTN)NextMemoryMapEntry < (UINTN)MemoryMapEnd) &&
+ (MemoryMapEntry->Type == NextMemoryMapEntry->Type) &&
+ (MemoryMapEntry->Attribute == NextMemoryMapEntry->Attribute) &&
+ ((MemoryMapEntry->PhysicalStart + MemoryBlockLength) == NextMemoryMapEntry->PhysicalStart)) {
+ MemoryMapEntry->NumberOfPages += NextMemoryMapEntry->NumberOfPages;
+ if (NewMemoryMapEntry != MemoryMapEntry) {
+ NewMemoryMapEntry->NumberOfPages += NextMemoryMapEntry->NumberOfPages;
+ }
+
+ NextMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (NextMemoryMapEntry, DescriptorSize);
+ continue;
+ } else {
+ MemoryMapEntry = PREVIOUS_MEMORY_DESCRIPTOR (NextMemoryMapEntry, DescriptorSize);
+ break;
+ }
+ } while (TRUE);
+
+ MemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ NewMemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (NewMemoryMapEntry, DescriptorSize);
+ }
+
+ *MemoryMapSize = (UINTN)NewMemoryMapEntry - (UINTN)MemoryMap;
+
+ return ;
+}
+
+/**
+ Enforce memory map attributes.
+ This function will set EfiRuntimeServicesData/EfiMemoryMappedIO/EfiMemoryMappedIOPortSpace to be EFI_MEMORY_XP.
+
+ @param[in, out] MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param[in] MemoryMapSize Size, in bytes, of the MemoryMap buffer.
+ @param[in] DescriptorSize Size, in bytes, of an individual EFI_MEMORY_DESCRIPTOR.
+**/
+STATIC
+VOID
+EnforceMemoryMapAttribute (
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ IN UINTN MemoryMapSize,
+ IN UINTN DescriptorSize
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEntry;
+ EFI_MEMORY_DESCRIPTOR *MemoryMapEnd;
+
+ MemoryMapEntry = MemoryMap;
+ MemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) MemoryMap + MemoryMapSize);
+ while ((UINTN)MemoryMapEntry < (UINTN)MemoryMapEnd) {
+ switch (MemoryMapEntry->Type) {
+ case EfiRuntimeServicesCode:
+ MemoryMapEntry->Attribute |= EFI_MEMORY_RO;
+ break;
+ case EfiRuntimeServicesData:
+ MemoryMapEntry->Attribute |= EFI_MEMORY_XP;
+ break;
+ }
+
+ MemoryMapEntry = NEXT_MEMORY_DESCRIPTOR (MemoryMapEntry, DescriptorSize);
+ }
+
+ return ;
+}
+
+/**
+ Return the first image record, whose [ImageBase, ImageSize] covered by [Buffer, Length].
+
+ @param[in] Buffer Start Address
+ @param[in] Length Address length
+
+ @return first image record covered by [buffer, length]
+**/
+STATIC
+IMAGE_PROPERTIES_RECORD *
+GetImageRecordByAddress (
+ IN EFI_PHYSICAL_ADDRESS Buffer,
+ IN UINT64 Length
+ )
+{
+ IMAGE_PROPERTIES_RECORD *ImageRecord;
+ LIST_ENTRY *ImageRecordLink;
+ LIST_ENTRY *ImageRecordList;
+
+ ImageRecordList = &mImagePropertiesPrivateData.ImageRecordList;
+
+ for (ImageRecordLink = ImageRecordList->ForwardLink;
+ ImageRecordLink != ImageRecordList;
+ ImageRecordLink = ImageRecordLink->ForwardLink) {
+ ImageRecord = CR (
+ ImageRecordLink,
+ IMAGE_PROPERTIES_RECORD,
+ Link,
+ IMAGE_PROPERTIES_RECORD_SIGNATURE
+ );
+
+ if ((Buffer <= ImageRecord->ImageBase) &&
+ (Buffer + Length >= ImageRecord->ImageBase + ImageRecord->ImageSize)) {
+ return ImageRecord;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ Set the memory map to new entries, according to one old entry,
+ based upon PE code section and data section in image record
+
+ @param[in] ImageRecord An image record whose [ImageBase, ImageSize] covered
+ by old memory map entry.
+ @param[in, out] NewRecord A pointer to several new memory map entries.
+ The caller gurantee the buffer size be 1 +
+ (SplitRecordCount * DescriptorSize) calculated
+ below.
+ @param[in] OldRecord A pointer to one old memory map entry.
+ @param[in] DescriptorSize Size, in bytes, of an individual EFI_MEMORY_DESCRIPTOR.
+**/
+STATIC
+UINTN
+SetNewRecord (
+ IN IMAGE_PROPERTIES_RECORD *ImageRecord,
+ IN OUT EFI_MEMORY_DESCRIPTOR *NewRecord,
+ IN EFI_MEMORY_DESCRIPTOR *OldRecord,
+ IN UINTN DescriptorSize
+ )
+{
+ EFI_MEMORY_DESCRIPTOR TempRecord;
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION *ImageRecordCodeSection;
+ LIST_ENTRY *ImageRecordCodeSectionLink;
+ LIST_ENTRY *ImageRecordCodeSectionEndLink;
+ LIST_ENTRY *ImageRecordCodeSectionList;
+ UINTN NewRecordCount;
+ UINT64 PhysicalEnd;
+ UINT64 ImageEnd;
+
+ CopyMem (&TempRecord, OldRecord, sizeof(EFI_MEMORY_DESCRIPTOR));
+ PhysicalEnd = TempRecord.PhysicalStart + EfiPagesToSize(TempRecord.NumberOfPages);
+ NewRecordCount = 0;
+
+ ImageRecordCodeSectionList = &ImageRecord->CodeSegmentList;
+
+ ImageRecordCodeSectionLink = ImageRecordCodeSectionList->ForwardLink;
+ ImageRecordCodeSectionEndLink = ImageRecordCodeSectionList;
+ while (ImageRecordCodeSectionLink != ImageRecordCodeSectionEndLink) {
+ ImageRecordCodeSection = CR (
+ ImageRecordCodeSectionLink,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION,
+ Link,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION_SIGNATURE
+ );
+ ImageRecordCodeSectionLink = ImageRecordCodeSectionLink->ForwardLink;
+
+ if (TempRecord.PhysicalStart <= ImageRecordCodeSection->CodeSegmentBase) {
+ //
+ // DATA
+ //
+ NewRecord->Type = EfiRuntimeServicesData;
+ NewRecord->PhysicalStart = TempRecord.PhysicalStart;
+ NewRecord->VirtualStart = 0;
+ NewRecord->NumberOfPages = EfiSizeToPages(ImageRecordCodeSection->CodeSegmentBase - NewRecord->PhysicalStart);
+ NewRecord->Attribute = TempRecord.Attribute | EFI_MEMORY_XP;
+ if (NewRecord->NumberOfPages != 0) {
+ NewRecord = NEXT_MEMORY_DESCRIPTOR (NewRecord, DescriptorSize);
+ NewRecordCount ++;
+ }
+
+ //
+ // CODE
+ //
+ NewRecord->Type = EfiRuntimeServicesCode;
+ NewRecord->PhysicalStart = ImageRecordCodeSection->CodeSegmentBase;
+ NewRecord->VirtualStart = 0;
+ NewRecord->NumberOfPages = EfiSizeToPages(ImageRecordCodeSection->CodeSegmentSize);
+ NewRecord->Attribute = (TempRecord.Attribute & (~EFI_MEMORY_XP)) | EFI_MEMORY_RO;
+ if (NewRecord->NumberOfPages != 0) {
+ NewRecord = NEXT_MEMORY_DESCRIPTOR (NewRecord, DescriptorSize);
+ NewRecordCount ++;
+ }
+
+ TempRecord.PhysicalStart = ImageRecordCodeSection->CodeSegmentBase + EfiPagesToSize (EfiSizeToPages(ImageRecordCodeSection->CodeSegmentSize));
+ TempRecord.NumberOfPages = EfiSizeToPages(PhysicalEnd - TempRecord.PhysicalStart);
+ if (TempRecord.NumberOfPages == 0) {
+ break;
+ }
+ }
+ }
+
+ ImageEnd = ImageRecord->ImageBase + ImageRecord->ImageSize;
+
+ //
+ // Final DATA
+ //
+ if (TempRecord.PhysicalStart < ImageEnd) {
+ NewRecord->Type = EfiRuntimeServicesData;
+ NewRecord->PhysicalStart = TempRecord.PhysicalStart;
+ NewRecord->VirtualStart = 0;
+ NewRecord->NumberOfPages = EfiSizeToPages (ImageEnd - TempRecord.PhysicalStart);
+ NewRecord->Attribute = TempRecord.Attribute | EFI_MEMORY_XP;
+ NewRecordCount ++;
+ }
+
+ return NewRecordCount;
+}
+
+/**
+ Return the max number of new splitted entries, according to one old entry,
+ based upon PE code section and data section.
+
+ @param[in] OldRecord A pointer to one old memory map entry.
+
+ @retval 0 no entry need to be splitted.
+ @return the max number of new splitted entries
+**/
+STATIC
+UINTN
+GetMaxSplitRecordCount (
+ IN EFI_MEMORY_DESCRIPTOR *OldRecord
+ )
+{
+ IMAGE_PROPERTIES_RECORD *ImageRecord;
+ UINTN SplitRecordCount;
+ UINT64 PhysicalStart;
+ UINT64 PhysicalEnd;
+
+ SplitRecordCount = 0;
+ PhysicalStart = OldRecord->PhysicalStart;
+ PhysicalEnd = OldRecord->PhysicalStart + EfiPagesToSize(OldRecord->NumberOfPages);
+
+ do {
+ ImageRecord = GetImageRecordByAddress (PhysicalStart, PhysicalEnd - PhysicalStart);
+ if (ImageRecord == NULL) {
+ break;
+ }
+ SplitRecordCount += (2 * ImageRecord->CodeSegmentCount + 1);
+ PhysicalStart = ImageRecord->ImageBase + ImageRecord->ImageSize;
+ } while ((ImageRecord != NULL) && (PhysicalStart < PhysicalEnd));
+
+ if (SplitRecordCount != 0) {
+ SplitRecordCount--;
+ }
+
+ return SplitRecordCount;
+}
+
+/**
+ Split the memory map to new entries, according to one old entry,
+ based upon PE code section and data section.
+
+ @param[in] OldRecord A pointer to one old memory map entry.
+ @param[in, out] NewRecord A pointer to several new memory map entries.
+ The caller gurantee the buffer size be 1 +
+ (SplitRecordCount * DescriptorSize) calculated
+ below.
+ @param[in] MaxSplitRecordCount The max number of splitted entries
+ @param[in] DescriptorSize Size, in bytes, of an individual EFI_MEMORY_DESCRIPTOR.
+
+ @retval 0 no entry is splitted.
+ @return the real number of splitted record.
+**/
+STATIC
+UINTN
+SplitRecord (
+ IN EFI_MEMORY_DESCRIPTOR *OldRecord,
+ IN OUT EFI_MEMORY_DESCRIPTOR *NewRecord,
+ IN UINTN MaxSplitRecordCount,
+ IN UINTN DescriptorSize
+ )
+{
+ EFI_MEMORY_DESCRIPTOR TempRecord;
+ IMAGE_PROPERTIES_RECORD *ImageRecord;
+ IMAGE_PROPERTIES_RECORD *NewImageRecord;
+ UINT64 PhysicalStart;
+ UINT64 PhysicalEnd;
+ UINTN NewRecordCount;
+ UINTN TotalNewRecordCount;
+
+ if (MaxSplitRecordCount == 0) {
+ CopyMem (NewRecord, OldRecord, DescriptorSize);
+ return 0;
+ }
+
+ TotalNewRecordCount = 0;
+
+ //
+ // Override previous record
+ //
+ CopyMem (&TempRecord, OldRecord, sizeof(EFI_MEMORY_DESCRIPTOR));
+ PhysicalStart = TempRecord.PhysicalStart;
+ PhysicalEnd = TempRecord.PhysicalStart + EfiPagesToSize(TempRecord.NumberOfPages);
+
+ ImageRecord = NULL;
+ do {
+ NewImageRecord = GetImageRecordByAddress (PhysicalStart, PhysicalEnd - PhysicalStart);
+ if (NewImageRecord == NULL) {
+ //
+ // No more image covered by this range, stop
+ //
+ if ((PhysicalEnd > PhysicalStart) && (ImageRecord != NULL)) {
+ //
+ // If this is still address in this record, need record.
+ //
+ NewRecord = PREVIOUS_MEMORY_DESCRIPTOR (NewRecord, DescriptorSize);
+ if (NewRecord->Type == EfiRuntimeServicesData) {
+ //
+ // Last record is DATA, just merge it.
+ //
+ NewRecord->NumberOfPages = EfiSizeToPages(PhysicalEnd - NewRecord->PhysicalStart);
+ } else {
+ //
+ // Last record is CODE, create a new DATA entry.
+ //
+ NewRecord = NEXT_MEMORY_DESCRIPTOR (NewRecord, DescriptorSize);
+ NewRecord->Type = EfiRuntimeServicesData;
+ NewRecord->PhysicalStart = TempRecord.PhysicalStart;
+ NewRecord->VirtualStart = 0;
+ NewRecord->NumberOfPages = TempRecord.NumberOfPages;
+ NewRecord->Attribute = TempRecord.Attribute | EFI_MEMORY_XP;
+ TotalNewRecordCount ++;
+ }
+ }
+ break;
+ }
+ ImageRecord = NewImageRecord;
+
+ //
+ // Set new record
+ //
+ NewRecordCount = SetNewRecord (ImageRecord, NewRecord, &TempRecord, DescriptorSize);
+ TotalNewRecordCount += NewRecordCount;
+ NewRecord = (EFI_MEMORY_DESCRIPTOR *)((UINT8 *)NewRecord + NewRecordCount * DescriptorSize);
+
+ //
+ // Update PhysicalStart, in order to exclude the image buffer already splitted.
+ //
+ PhysicalStart = ImageRecord->ImageBase + ImageRecord->ImageSize;
+ TempRecord.PhysicalStart = PhysicalStart;
+ TempRecord.NumberOfPages = EfiSizeToPages (PhysicalEnd - PhysicalStart);
+ } while ((ImageRecord != NULL) && (PhysicalStart < PhysicalEnd));
+
+ return TotalNewRecordCount - 1;
+}
+
+/**
+ Split the original memory map, and add more entries to describe PE code section and data section.
+ This function will set EfiRuntimeServicesData to be EFI_MEMORY_XP.
+ This function will merge entries with same attributes finally.
+
+ NOTE: It assumes PE code/data section are page aligned.
+ NOTE: It assumes enough entry is prepared for new memory map.
+
+ Split table:
+ +---------------+
+ | Record X |
+ +---------------+
+ | Record RtCode |
+ +---------------+
+ | Record Y |
+ +---------------+
+ ==>
+ +---------------+
+ | Record X |
+ +---------------+ ----
+ | Record RtData | |
+ +---------------+ |
+ | Record RtCode | |-> PE/COFF1
+ +---------------+ |
+ | Record RtData | |
+ +---------------+ ----
+ | Record RtData | |
+ +---------------+ |
+ | Record RtCode | |-> PE/COFF2
+ +---------------+ |
+ | Record RtData | |
+ +---------------+ ----
+ | Record Y |
+ +---------------+
+
+ @param[in, out] MemoryMapSize A pointer to the size, in bytes, of the
+ MemoryMap buffer. On input, this is the size of
+ old MemoryMap before split. The actual buffer
+ size of MemoryMap is MemoryMapSize +
+ (AdditionalRecordCount * DescriptorSize) calculated
+ below. On output, it is the size of new MemoryMap
+ after split.
+ @param[in, out] MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param[in] DescriptorSize Size, in bytes, of an individual EFI_MEMORY_DESCRIPTOR.
+**/
+STATIC
+VOID
+SplitTable (
+ IN OUT UINTN *MemoryMapSize,
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ IN UINTN DescriptorSize
+ )
+{
+ INTN IndexOld;
+ INTN IndexNew;
+ UINTN MaxSplitRecordCount;
+ UINTN RealSplitRecordCount;
+ UINTN TotalSplitRecordCount;
+ UINTN AdditionalRecordCount;
+
+ AdditionalRecordCount = (2 * mImagePropertiesPrivateData.CodeSegmentCountMax + 1) * mImagePropertiesPrivateData.ImageRecordCount;
+
+ TotalSplitRecordCount = 0;
+ //
+ // Let old record point to end of valid MemoryMap buffer.
+ //
+ IndexOld = ((*MemoryMapSize) / DescriptorSize) - 1;
+ //
+ // Let new record point to end of full MemoryMap buffer.
+ //
+ IndexNew = ((*MemoryMapSize) / DescriptorSize) - 1 + AdditionalRecordCount;
+ for (; IndexOld >= 0; IndexOld--) {
+ MaxSplitRecordCount = GetMaxSplitRecordCount ((EFI_MEMORY_DESCRIPTOR *)((UINT8 *)MemoryMap + IndexOld * DescriptorSize));
+ //
+ // Split this MemoryMap record
+ //
+ IndexNew -= MaxSplitRecordCount;
+ RealSplitRecordCount = SplitRecord (
+ (EFI_MEMORY_DESCRIPTOR *)((UINT8 *)MemoryMap + IndexOld * DescriptorSize),
+ (EFI_MEMORY_DESCRIPTOR *)((UINT8 *)MemoryMap + IndexNew * DescriptorSize),
+ MaxSplitRecordCount,
+ DescriptorSize
+ );
+ //
+ // Adjust IndexNew according to real split.
+ //
+ CopyMem (
+ ((UINT8 *)MemoryMap + (IndexNew + MaxSplitRecordCount - RealSplitRecordCount) * DescriptorSize),
+ ((UINT8 *)MemoryMap + IndexNew * DescriptorSize),
+ RealSplitRecordCount * DescriptorSize
+ );
+ IndexNew = IndexNew + MaxSplitRecordCount - RealSplitRecordCount;
+ TotalSplitRecordCount += RealSplitRecordCount;
+ IndexNew --;
+ }
+ //
+ // Move all records to the beginning.
+ //
+ CopyMem (
+ MemoryMap,
+ (UINT8 *)MemoryMap + (AdditionalRecordCount - TotalSplitRecordCount) * DescriptorSize,
+ (*MemoryMapSize) + TotalSplitRecordCount * DescriptorSize
+ );
+
+ *MemoryMapSize = (*MemoryMapSize) + DescriptorSize * TotalSplitRecordCount;
+
+ //
+ // Sort from low to high (Just in case)
+ //
+ SortMemoryMap (MemoryMap, *MemoryMapSize, DescriptorSize);
+
+ //
+ // Set RuntimeData to XP
+ //
+ EnforceMemoryMapAttribute (MemoryMap, *MemoryMapSize, DescriptorSize);
+
+ //
+ // Merge same type to save entry size
+ //
+ MergeMemoryMap (MemoryMap, MemoryMapSize, DescriptorSize);
+
+ return ;
+}
+
+/**
+ This function for GetMemoryMap() with memory attributes table.
+
+ It calls original GetMemoryMap() to get the original memory map information. Then
+ plus the additional memory map entries for PE Code/Data seperation.
+
+ @param[in, out] MemoryMapSize A pointer to the size, in bytes, of the
+ MemoryMap buffer. On input, this is the size of
+ the buffer allocated by the caller. On output,
+ it is the size of the buffer returned by the
+ firmware if the buffer was large enough, or the
+ size of the buffer needed to contain the map if
+ the buffer was too small.
+ @param[in, out] MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param[out] MapKey A pointer to the location in which firmware
+ returns the key for the current memory map.
+ @param[out] DescriptorSize A pointer to the location in which firmware
+ returns the size, in bytes, of an individual
+ EFI_MEMORY_DESCRIPTOR.
+ @param[out] DescriptorVersion A pointer to the location in which firmware
+ returns the version number associated with the
+ EFI_MEMORY_DESCRIPTOR.
+
+ @retval EFI_SUCCESS The memory map was returned in the MemoryMap
+ buffer.
+ @retval EFI_BUFFER_TOO_SMALL The MemoryMap buffer was too small. The current
+ buffer size needed to hold the memory map is
+ returned in MemoryMapSize.
+ @retval EFI_INVALID_PARAMETER One of the parameters has an invalid value.
+
+**/
+STATIC
+EFI_STATUS
+EFIAPI
+SmmCoreGetMemoryMapMemoryAttributesTable (
+ IN OUT UINTN *MemoryMapSize,
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ OUT UINTN *MapKey,
+ OUT UINTN *DescriptorSize,
+ OUT UINT32 *DescriptorVersion
+ )
+{
+ EFI_STATUS Status;
+ UINTN OldMemoryMapSize;
+ UINTN AdditionalRecordCount;
+
+ //
+ // If PE code/data is not aligned, just return.
+ //
+ if ((mMemoryProtectionAttribute & EFI_MEMORY_ATTRIBUTES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA) == 0) {
+ return SmmCoreGetMemoryMap (MemoryMapSize, MemoryMap, MapKey, DescriptorSize, DescriptorVersion);
+ }
+
+ if (MemoryMapSize == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ AdditionalRecordCount = (2 * mImagePropertiesPrivateData.CodeSegmentCountMax + 1) * mImagePropertiesPrivateData.ImageRecordCount;
+
+ OldMemoryMapSize = *MemoryMapSize;
+ Status = SmmCoreGetMemoryMap (MemoryMapSize, MemoryMap, MapKey, DescriptorSize, DescriptorVersion);
+ if (Status == EFI_BUFFER_TOO_SMALL) {
+ *MemoryMapSize = *MemoryMapSize + (*DescriptorSize) * AdditionalRecordCount;
+ } else if (Status == EFI_SUCCESS) {
+ if (OldMemoryMapSize - *MemoryMapSize < (*DescriptorSize) * AdditionalRecordCount) {
+ *MemoryMapSize = *MemoryMapSize + (*DescriptorSize) * AdditionalRecordCount;
+ //
+ // Need update status to buffer too small
+ //
+ Status = EFI_BUFFER_TOO_SMALL;
+ } else {
+ //
+ // Split PE code/data
+ //
+ ASSERT(MemoryMap != NULL);
+ SplitTable (MemoryMapSize, MemoryMap, *DescriptorSize);
+ }
+ }
+
+ return Status;
+}
+
+//
+// Below functions are for ImageRecord
+//
+
+/**
+ Set MemoryProtectionAttribute accroding to PE/COFF image section alignment.
+
+ @param[in] SectionAlignment PE/COFF section alignment
+**/
+STATIC
+VOID
+SetMemoryAttributesTableSectionAlignment (
+ IN UINT32 SectionAlignment
+ )
+{
+ if (((SectionAlignment & (EFI_ACPI_RUNTIME_PAGE_ALLOCATION_ALIGNMENT - 1)) != 0) &&
+ ((mMemoryProtectionAttribute & EFI_MEMORY_ATTRIBUTES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA) != 0)) {
+ DEBUG ((DEBUG_VERBOSE, "SMM SetMemoryAttributesTableSectionAlignment - Clear\n"));
+ mMemoryProtectionAttribute &= ~((UINT64)EFI_MEMORY_ATTRIBUTES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA);
+ }
+}
+
+/**
+ Swap two code sections in image record.
+
+ @param[in] FirstImageRecordCodeSection first code section in image record
+ @param[in] SecondImageRecordCodeSection second code section in image record
+**/
+STATIC
+VOID
+SwapImageRecordCodeSection (
+ IN IMAGE_PROPERTIES_RECORD_CODE_SECTION *FirstImageRecordCodeSection,
+ IN IMAGE_PROPERTIES_RECORD_CODE_SECTION *SecondImageRecordCodeSection
+ )
+{
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION TempImageRecordCodeSection;
+
+ TempImageRecordCodeSection.CodeSegmentBase = FirstImageRecordCodeSection->CodeSegmentBase;
+ TempImageRecordCodeSection.CodeSegmentSize = FirstImageRecordCodeSection->CodeSegmentSize;
+
+ FirstImageRecordCodeSection->CodeSegmentBase = SecondImageRecordCodeSection->CodeSegmentBase;
+ FirstImageRecordCodeSection->CodeSegmentSize = SecondImageRecordCodeSection->CodeSegmentSize;
+
+ SecondImageRecordCodeSection->CodeSegmentBase = TempImageRecordCodeSection.CodeSegmentBase;
+ SecondImageRecordCodeSection->CodeSegmentSize = TempImageRecordCodeSection.CodeSegmentSize;
+}
+
+/**
+ Sort code section in image record, based upon CodeSegmentBase from low to high.
+
+ @param[in] ImageRecord image record to be sorted
+**/
+STATIC
+VOID
+SortImageRecordCodeSection (
+ IN IMAGE_PROPERTIES_RECORD *ImageRecord
+ )
+{
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION *ImageRecordCodeSection;
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION *NextImageRecordCodeSection;
+ LIST_ENTRY *ImageRecordCodeSectionLink;
+ LIST_ENTRY *NextImageRecordCodeSectionLink;
+ LIST_ENTRY *ImageRecordCodeSectionEndLink;
+ LIST_ENTRY *ImageRecordCodeSectionList;
+
+ ImageRecordCodeSectionList = &ImageRecord->CodeSegmentList;
+
+ ImageRecordCodeSectionLink = ImageRecordCodeSectionList->ForwardLink;
+ NextImageRecordCodeSectionLink = ImageRecordCodeSectionLink->ForwardLink;
+ ImageRecordCodeSectionEndLink = ImageRecordCodeSectionList;
+ while (ImageRecordCodeSectionLink != ImageRecordCodeSectionEndLink) {
+ ImageRecordCodeSection = CR (
+ ImageRecordCodeSectionLink,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION,
+ Link,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION_SIGNATURE
+ );
+ while (NextImageRecordCodeSectionLink != ImageRecordCodeSectionEndLink) {
+ NextImageRecordCodeSection = CR (
+ NextImageRecordCodeSectionLink,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION,
+ Link,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION_SIGNATURE
+ );
+ if (ImageRecordCodeSection->CodeSegmentBase > NextImageRecordCodeSection->CodeSegmentBase) {
+ SwapImageRecordCodeSection (ImageRecordCodeSection, NextImageRecordCodeSection);
+ }
+ NextImageRecordCodeSectionLink = NextImageRecordCodeSectionLink->ForwardLink;
+ }
+
+ ImageRecordCodeSectionLink = ImageRecordCodeSectionLink->ForwardLink;
+ NextImageRecordCodeSectionLink = ImageRecordCodeSectionLink->ForwardLink;
+ }
+}
+
+/**
+ Check if code section in image record is valid.
+
+ @param[in] ImageRecord image record to be checked
+
+ @retval TRUE image record is valid
+ @retval FALSE image record is invalid
+**/
+STATIC
+BOOLEAN
+IsImageRecordCodeSectionValid (
+ IN IMAGE_PROPERTIES_RECORD *ImageRecord
+ )
+{
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION *ImageRecordCodeSection;
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION *LastImageRecordCodeSection;
+ LIST_ENTRY *ImageRecordCodeSectionLink;
+ LIST_ENTRY *ImageRecordCodeSectionEndLink;
+ LIST_ENTRY *ImageRecordCodeSectionList;
+
+ DEBUG ((DEBUG_VERBOSE, "SMM ImageCode SegmentCount - 0x%x\n", ImageRecord->CodeSegmentCount));
+
+ ImageRecordCodeSectionList = &ImageRecord->CodeSegmentList;
+
+ ImageRecordCodeSectionLink = ImageRecordCodeSectionList->ForwardLink;
+ ImageRecordCodeSectionEndLink = ImageRecordCodeSectionList;
+ LastImageRecordCodeSection = NULL;
+ while (ImageRecordCodeSectionLink != ImageRecordCodeSectionEndLink) {
+ ImageRecordCodeSection = CR (
+ ImageRecordCodeSectionLink,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION,
+ Link,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION_SIGNATURE
+ );
+ if (ImageRecordCodeSection->CodeSegmentSize == 0) {
+ return FALSE;
+ }
+ if (ImageRecordCodeSection->CodeSegmentBase < ImageRecord->ImageBase) {
+ return FALSE;
+ }
+ if (ImageRecordCodeSection->CodeSegmentBase >= MAX_ADDRESS - ImageRecordCodeSection->CodeSegmentSize) {
+ return FALSE;
+ }
+ if ((ImageRecordCodeSection->CodeSegmentBase + ImageRecordCodeSection->CodeSegmentSize) > (ImageRecord->ImageBase + ImageRecord->ImageSize)) {
+ return FALSE;
+ }
+ if (LastImageRecordCodeSection != NULL) {
+ if ((LastImageRecordCodeSection->CodeSegmentBase + LastImageRecordCodeSection->CodeSegmentSize) > ImageRecordCodeSection->CodeSegmentBase) {
+ return FALSE;
+ }
+ }
+
+ LastImageRecordCodeSection = ImageRecordCodeSection;
+ ImageRecordCodeSectionLink = ImageRecordCodeSectionLink->ForwardLink;
+ }
+
+ return TRUE;
+}
+
+/**
+ Swap two image records.
+
+ @param[in] FirstImageRecord first image record.
+ @param[in] SecondImageRecord second image record.
+**/
+STATIC
+VOID
+SwapImageRecord (
+ IN IMAGE_PROPERTIES_RECORD *FirstImageRecord,
+ IN IMAGE_PROPERTIES_RECORD *SecondImageRecord
+ )
+{
+ IMAGE_PROPERTIES_RECORD TempImageRecord;
+
+ TempImageRecord.ImageBase = FirstImageRecord->ImageBase;
+ TempImageRecord.ImageSize = FirstImageRecord->ImageSize;
+ TempImageRecord.CodeSegmentCount = FirstImageRecord->CodeSegmentCount;
+
+ FirstImageRecord->ImageBase = SecondImageRecord->ImageBase;
+ FirstImageRecord->ImageSize = SecondImageRecord->ImageSize;
+ FirstImageRecord->CodeSegmentCount = SecondImageRecord->CodeSegmentCount;
+
+ SecondImageRecord->ImageBase = TempImageRecord.ImageBase;
+ SecondImageRecord->ImageSize = TempImageRecord.ImageSize;
+ SecondImageRecord->CodeSegmentCount = TempImageRecord.CodeSegmentCount;
+
+ SwapListEntries (&FirstImageRecord->CodeSegmentList, &SecondImageRecord->CodeSegmentList);
+}
+
+/**
+ Sort image record based upon the ImageBase from low to high.
+**/
+STATIC
+VOID
+SortImageRecord (
+ VOID
+ )
+{
+ IMAGE_PROPERTIES_RECORD *ImageRecord;
+ IMAGE_PROPERTIES_RECORD *NextImageRecord;
+ LIST_ENTRY *ImageRecordLink;
+ LIST_ENTRY *NextImageRecordLink;
+ LIST_ENTRY *ImageRecordEndLink;
+ LIST_ENTRY *ImageRecordList;
+
+ ImageRecordList = &mImagePropertiesPrivateData.ImageRecordList;
+
+ ImageRecordLink = ImageRecordList->ForwardLink;
+ NextImageRecordLink = ImageRecordLink->ForwardLink;
+ ImageRecordEndLink = ImageRecordList;
+ while (ImageRecordLink != ImageRecordEndLink) {
+ ImageRecord = CR (
+ ImageRecordLink,
+ IMAGE_PROPERTIES_RECORD,
+ Link,
+ IMAGE_PROPERTIES_RECORD_SIGNATURE
+ );
+ while (NextImageRecordLink != ImageRecordEndLink) {
+ NextImageRecord = CR (
+ NextImageRecordLink,
+ IMAGE_PROPERTIES_RECORD,
+ Link,
+ IMAGE_PROPERTIES_RECORD_SIGNATURE
+ );
+ if (ImageRecord->ImageBase > NextImageRecord->ImageBase) {
+ SwapImageRecord (ImageRecord, NextImageRecord);
+ }
+ NextImageRecordLink = NextImageRecordLink->ForwardLink;
+ }
+
+ ImageRecordLink = ImageRecordLink->ForwardLink;
+ NextImageRecordLink = ImageRecordLink->ForwardLink;
+ }
+}
+
+/**
+ Dump image record.
+**/
+STATIC
+VOID
+DumpImageRecord (
+ VOID
+ )
+{
+ IMAGE_PROPERTIES_RECORD *ImageRecord;
+ LIST_ENTRY *ImageRecordLink;
+ LIST_ENTRY *ImageRecordList;
+ UINTN Index;
+
+ ImageRecordList = &mImagePropertiesPrivateData.ImageRecordList;
+
+ for (ImageRecordLink = ImageRecordList->ForwardLink, Index= 0;
+ ImageRecordLink != ImageRecordList;
+ ImageRecordLink = ImageRecordLink->ForwardLink, Index++) {
+ ImageRecord = CR (
+ ImageRecordLink,
+ IMAGE_PROPERTIES_RECORD,
+ Link,
+ IMAGE_PROPERTIES_RECORD_SIGNATURE
+ );
+ DEBUG ((DEBUG_VERBOSE, "SMM Image[%d]: 0x%016lx - 0x%016lx\n", Index, ImageRecord->ImageBase, ImageRecord->ImageSize));
+ }
+}
+
+/**
+ Insert image record.
+
+ @param[in] DriverEntry Driver information
+**/
+VOID
+SmmInsertImageRecord (
+ IN EFI_SMM_DRIVER_ENTRY *DriverEntry
+ )
+{
+ VOID *ImageAddress;
+ EFI_IMAGE_DOS_HEADER *DosHdr;
+ UINT32 PeCoffHeaderOffset;
+ UINT32 SectionAlignment;
+ EFI_IMAGE_SECTION_HEADER *Section;
+ EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr;
+ UINT8 *Name;
+ UINTN Index;
+ IMAGE_PROPERTIES_RECORD *ImageRecord;
+ CHAR8 *PdbPointer;
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION *ImageRecordCodeSection;
+ UINT16 Magic;
+
+ DEBUG ((DEBUG_VERBOSE, "SMM InsertImageRecord - 0x%x\n", DriverEntry));
+ DEBUG ((DEBUG_VERBOSE, "SMM InsertImageRecord - 0x%016lx - 0x%08x\n", DriverEntry->ImageBuffer, DriverEntry->NumberOfPage));
+
+ ImageRecord = AllocatePool (sizeof(*ImageRecord));
+ if (ImageRecord == NULL) {
+ return ;
+ }
+ ImageRecord->Signature = IMAGE_PROPERTIES_RECORD_SIGNATURE;
+
+ DEBUG ((DEBUG_VERBOSE, "SMM ImageRecordCount - 0x%x\n", mImagePropertiesPrivateData.ImageRecordCount));
+
+ //
+ // Step 1: record whole region
+ //
+ ImageRecord->ImageBase = DriverEntry->ImageBuffer;
+ ImageRecord->ImageSize = EFI_PAGES_TO_SIZE(DriverEntry->NumberOfPage);
+
+ ImageAddress = (VOID *)(UINTN)DriverEntry->ImageBuffer;
+
+ PdbPointer = PeCoffLoaderGetPdbPointer ((VOID*) (UINTN) ImageAddress);
+ if (PdbPointer != NULL) {
+ DEBUG ((DEBUG_VERBOSE, "SMM Image - %a\n", PdbPointer));
+ }
+
+ //
+ // Check PE/COFF image
+ //
+ DosHdr = (EFI_IMAGE_DOS_HEADER *) (UINTN) ImageAddress;
+ PeCoffHeaderOffset = 0;
+ if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) {
+ PeCoffHeaderOffset = DosHdr->e_lfanew;
+ }
+
+ Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)((UINT8 *) (UINTN) ImageAddress + PeCoffHeaderOffset);
+ if (Hdr.Pe32->Signature != EFI_IMAGE_NT_SIGNATURE) {
+ DEBUG ((DEBUG_VERBOSE, "SMM Hdr.Pe32->Signature invalid - 0x%x\n", Hdr.Pe32->Signature));
+ goto Finish;
+ }
+
+ //
+ // Get SectionAlignment
+ //
+ if (Hdr.Pe32->FileHeader.Machine == IMAGE_FILE_MACHINE_IA64 && Hdr.Pe32->OptionalHeader.Magic == EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC) {
+ //
+ // NOTE: Some versions of Linux ELILO for Itanium have an incorrect magic value
+ // in the PE/COFF Header. If the MachineType is Itanium(IA64) and the
+ // Magic value in the OptionalHeader is EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC
+ // then override the magic value to EFI_IMAGE_NT_OPTIONAL_HDR64_MAGIC
+ //
+ Magic = EFI_IMAGE_NT_OPTIONAL_HDR64_MAGIC;
+ } else {
+ //
+ // Get the magic value from the PE/COFF Optional Header
+ //
+ Magic = Hdr.Pe32->OptionalHeader.Magic;
+ }
+ if (Magic == EFI_IMAGE_NT_OPTIONAL_HDR32_MAGIC) {
+ SectionAlignment = Hdr.Pe32->OptionalHeader.SectionAlignment;
+ } else {
+ SectionAlignment = Hdr.Pe32Plus->OptionalHeader.SectionAlignment;
+ }
+
+ SetMemoryAttributesTableSectionAlignment (SectionAlignment);
+ if ((SectionAlignment & (EFI_ACPI_RUNTIME_PAGE_ALLOCATION_ALIGNMENT - 1)) != 0) {
+ DEBUG ((DEBUG_ERROR, "SMM !!!!!!!! InsertImageRecord - Section Alignment(0x%x) is not %dK !!!!!!!!\n",
+ SectionAlignment, EFI_ACPI_RUNTIME_PAGE_ALLOCATION_ALIGNMENT >> 10));
+ PdbPointer = PeCoffLoaderGetPdbPointer ((VOID*) (UINTN) ImageAddress);
+ if (PdbPointer != NULL) {
+ DEBUG ((DEBUG_ERROR, "SMM !!!!!!!! Image - %a !!!!!!!!\n", PdbPointer));
+ }
+ goto Finish;
+ }
+
+ Section = (EFI_IMAGE_SECTION_HEADER *) (
+ (UINT8 *) (UINTN) ImageAddress +
+ PeCoffHeaderOffset +
+ sizeof(UINT32) +
+ sizeof(EFI_IMAGE_FILE_HEADER) +
+ Hdr.Pe32->FileHeader.SizeOfOptionalHeader
+ );
+ ImageRecord->CodeSegmentCount = 0;
+ InitializeListHead (&ImageRecord->CodeSegmentList);
+ for (Index = 0; Index < Hdr.Pe32->FileHeader.NumberOfSections; Index++) {
+ Name = Section[Index].Name;
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "SMM Section - '%c%c%c%c%c%c%c%c'\n",
+ Name[0],
+ Name[1],
+ Name[2],
+ Name[3],
+ Name[4],
+ Name[5],
+ Name[6],
+ Name[7]
+ ));
+
+ if ((Section[Index].Characteristics & EFI_IMAGE_SCN_CNT_CODE) != 0) {
+ DEBUG ((DEBUG_VERBOSE, "SMM VirtualSize - 0x%08x\n", Section[Index].Misc.VirtualSize));
+ DEBUG ((DEBUG_VERBOSE, "SMM VirtualAddress - 0x%08x\n", Section[Index].VirtualAddress));
+ DEBUG ((DEBUG_VERBOSE, "SMM SizeOfRawData - 0x%08x\n", Section[Index].SizeOfRawData));
+ DEBUG ((DEBUG_VERBOSE, "SMM PointerToRawData - 0x%08x\n", Section[Index].PointerToRawData));
+ DEBUG ((DEBUG_VERBOSE, "SMM PointerToRelocations - 0x%08x\n", Section[Index].PointerToRelocations));
+ DEBUG ((DEBUG_VERBOSE, "SMM PointerToLinenumbers - 0x%08x\n", Section[Index].PointerToLinenumbers));
+ DEBUG ((DEBUG_VERBOSE, "SMM NumberOfRelocations - 0x%08x\n", Section[Index].NumberOfRelocations));
+ DEBUG ((DEBUG_VERBOSE, "SMM NumberOfLinenumbers - 0x%08x\n", Section[Index].NumberOfLinenumbers));
+ DEBUG ((DEBUG_VERBOSE, "SMM Characteristics - 0x%08x\n", Section[Index].Characteristics));
+
+ //
+ // Step 2: record code section
+ //
+ ImageRecordCodeSection = AllocatePool (sizeof(*ImageRecordCodeSection));
+ if (ImageRecordCodeSection == NULL) {
+ return ;
+ }
+ ImageRecordCodeSection->Signature = IMAGE_PROPERTIES_RECORD_CODE_SECTION_SIGNATURE;
+
+ ImageRecordCodeSection->CodeSegmentBase = (UINTN)ImageAddress + Section[Index].VirtualAddress;
+ ImageRecordCodeSection->CodeSegmentSize = Section[Index].SizeOfRawData;
+
+ DEBUG ((DEBUG_VERBOSE, "SMM ImageCode: 0x%016lx - 0x%016lx\n", ImageRecordCodeSection->CodeSegmentBase, ImageRecordCodeSection->CodeSegmentSize));
+
+ InsertTailList (&ImageRecord->CodeSegmentList, &ImageRecordCodeSection->Link);
+ ImageRecord->CodeSegmentCount++;
+ }
+ }
+
+ if (ImageRecord->CodeSegmentCount == 0) {
+ SetMemoryAttributesTableSectionAlignment (1);
+ DEBUG ((DEBUG_ERROR, "SMM !!!!!!!! InsertImageRecord - CodeSegmentCount is 0 !!!!!!!!\n"));
+ PdbPointer = PeCoffLoaderGetPdbPointer ((VOID*) (UINTN) ImageAddress);
+ if (PdbPointer != NULL) {
+ DEBUG ((DEBUG_ERROR, "SMM !!!!!!!! Image - %a !!!!!!!!\n", PdbPointer));
+ }
+ goto Finish;
+ }
+
+ //
+ // Final
+ //
+ SortImageRecordCodeSection (ImageRecord);
+ //
+ // Check overlap all section in ImageBase/Size
+ //
+ if (!IsImageRecordCodeSectionValid (ImageRecord)) {
+ DEBUG ((DEBUG_ERROR, "SMM IsImageRecordCodeSectionValid - FAIL\n"));
+ goto Finish;
+ }
+
+ InsertTailList (&mImagePropertiesPrivateData.ImageRecordList, &ImageRecord->Link);
+ mImagePropertiesPrivateData.ImageRecordCount++;
+
+ SortImageRecord ();
+
+ if (mImagePropertiesPrivateData.CodeSegmentCountMax < ImageRecord->CodeSegmentCount) {
+ mImagePropertiesPrivateData.CodeSegmentCountMax = ImageRecord->CodeSegmentCount;
+ }
+
+Finish:
+ return ;
+}
+
+/**
+ Find image record accroding to image base and size.
+
+ @param[in] ImageBase Base of PE image
+ @param[in] ImageSize Size of PE image
+
+ @return image record
+**/
+STATIC
+IMAGE_PROPERTIES_RECORD *
+FindImageRecord (
+ IN EFI_PHYSICAL_ADDRESS ImageBase,
+ IN UINT64 ImageSize
+ )
+{
+ IMAGE_PROPERTIES_RECORD *ImageRecord;
+ LIST_ENTRY *ImageRecordLink;
+ LIST_ENTRY *ImageRecordList;
+
+ ImageRecordList = &mImagePropertiesPrivateData.ImageRecordList;
+
+ for (ImageRecordLink = ImageRecordList->ForwardLink;
+ ImageRecordLink != ImageRecordList;
+ ImageRecordLink = ImageRecordLink->ForwardLink) {
+ ImageRecord = CR (
+ ImageRecordLink,
+ IMAGE_PROPERTIES_RECORD,
+ Link,
+ IMAGE_PROPERTIES_RECORD_SIGNATURE
+ );
+
+ if ((ImageBase == ImageRecord->ImageBase) &&
+ (ImageSize == ImageRecord->ImageSize)) {
+ return ImageRecord;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ Remove Image record.
+
+ @param[in] DriverEntry Driver information
+**/
+VOID
+SmmRemoveImageRecord (
+ IN EFI_SMM_DRIVER_ENTRY *DriverEntry
+ )
+{
+ IMAGE_PROPERTIES_RECORD *ImageRecord;
+ LIST_ENTRY *CodeSegmentListHead;
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION *ImageRecordCodeSection;
+
+ DEBUG ((DEBUG_VERBOSE, "SMM RemoveImageRecord - 0x%x\n", DriverEntry));
+ DEBUG ((DEBUG_VERBOSE, "SMM RemoveImageRecord - 0x%016lx - 0x%016lx\n", DriverEntry->ImageBuffer, DriverEntry->NumberOfPage));
+
+ ImageRecord = FindImageRecord (DriverEntry->ImageBuffer, EFI_PAGES_TO_SIZE(DriverEntry->NumberOfPage));
+ if (ImageRecord == NULL) {
+ DEBUG ((DEBUG_ERROR, "SMM !!!!!!!! ImageRecord not found !!!!!!!!\n"));
+ return ;
+ }
+
+ CodeSegmentListHead = &ImageRecord->CodeSegmentList;
+ while (!IsListEmpty (CodeSegmentListHead)) {
+ ImageRecordCodeSection = CR (
+ CodeSegmentListHead->ForwardLink,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION,
+ Link,
+ IMAGE_PROPERTIES_RECORD_CODE_SECTION_SIGNATURE
+ );
+ RemoveEntryList (&ImageRecordCodeSection->Link);
+ FreePool (ImageRecordCodeSection);
+ }
+
+ RemoveEntryList (&ImageRecord->Link);
+ FreePool (ImageRecord);
+ mImagePropertiesPrivateData.ImageRecordCount--;
+}
+
+/**
+ Publish MemoryAttributesTable to SMM configuration table.
+**/
+VOID
+PublishMemoryAttributesTable (
+ VOID
+ )
+{
+ UINTN MemoryMapSize;
+ EFI_MEMORY_DESCRIPTOR *MemoryMap;
+ UINTN MapKey;
+ UINTN DescriptorSize;
+ UINT32 DescriptorVersion;
+ UINTN Index;
+ EFI_STATUS Status;
+ UINTN RuntimeEntryCount;
+ EDKII_PI_SMM_MEMORY_ATTRIBUTES_TABLE *MemoryAttributesTable;
+ EFI_MEMORY_DESCRIPTOR *MemoryAttributesEntry;
+ UINTN MemoryAttributesTableSize;
+
+ MemoryMapSize = 0;
+ MemoryMap = NULL;
+ Status = SmmCoreGetMemoryMapMemoryAttributesTable (
+ &MemoryMapSize,
+ MemoryMap,
+ &MapKey,
+ &DescriptorSize,
+ &DescriptorVersion
+ );
+ ASSERT (Status == EFI_BUFFER_TOO_SMALL);
+
+ do {
+ DEBUG ((DEBUG_INFO, "MemoryMapSize - 0x%x\n", MemoryMapSize));
+ MemoryMap = AllocatePool (MemoryMapSize);
+ ASSERT (MemoryMap != NULL);
+ DEBUG ((DEBUG_INFO, "MemoryMap - 0x%x\n", MemoryMap));
+
+ Status = SmmCoreGetMemoryMapMemoryAttributesTable (
+ &MemoryMapSize,
+ MemoryMap,
+ &MapKey,
+ &DescriptorSize,
+ &DescriptorVersion
+ );
+ if (EFI_ERROR (Status)) {
+ FreePool (MemoryMap);
+ }
+ } while (Status == EFI_BUFFER_TOO_SMALL);
+
+ //
+ // Allocate MemoryAttributesTable
+ //
+ RuntimeEntryCount = MemoryMapSize/DescriptorSize;
+ MemoryAttributesTableSize = sizeof(EDKII_PI_SMM_MEMORY_ATTRIBUTES_TABLE) + DescriptorSize * RuntimeEntryCount;
+ MemoryAttributesTable = AllocatePool (sizeof(EDKII_PI_SMM_MEMORY_ATTRIBUTES_TABLE) + DescriptorSize * RuntimeEntryCount);
+ ASSERT (MemoryAttributesTable != NULL);
+ MemoryAttributesTable->Version = EDKII_PI_SMM_MEMORY_ATTRIBUTES_TABLE_VERSION;
+ MemoryAttributesTable->NumberOfEntries = (UINT32)RuntimeEntryCount;
+ MemoryAttributesTable->DescriptorSize = (UINT32)DescriptorSize;
+ MemoryAttributesTable->Reserved = 0;
+ DEBUG ((DEBUG_INFO, "MemoryAttributesTable:\n"));
+ DEBUG ((DEBUG_INFO, " Version - 0x%08x\n", MemoryAttributesTable->Version));
+ DEBUG ((DEBUG_INFO, " NumberOfEntries - 0x%08x\n", MemoryAttributesTable->NumberOfEntries));
+ DEBUG ((DEBUG_INFO, " DescriptorSize - 0x%08x\n", MemoryAttributesTable->DescriptorSize));
+ MemoryAttributesEntry = (EFI_MEMORY_DESCRIPTOR *)(MemoryAttributesTable + 1);
+ for (Index = 0; Index < MemoryMapSize/DescriptorSize; Index++) {
+ CopyMem (MemoryAttributesEntry, MemoryMap, DescriptorSize);
+ DEBUG ((DEBUG_INFO, "Entry (0x%x)\n", MemoryAttributesEntry));
+ DEBUG ((DEBUG_INFO, " Type - 0x%x\n", MemoryAttributesEntry->Type));
+ DEBUG ((DEBUG_INFO, " PhysicalStart - 0x%016lx\n", MemoryAttributesEntry->PhysicalStart));
+ DEBUG ((DEBUG_INFO, " VirtualStart - 0x%016lx\n", MemoryAttributesEntry->VirtualStart));
+ DEBUG ((DEBUG_INFO, " NumberOfPages - 0x%016lx\n", MemoryAttributesEntry->NumberOfPages));
+ DEBUG ((DEBUG_INFO, " Attribute - 0x%016lx\n", MemoryAttributesEntry->Attribute));
+ MemoryAttributesEntry = NEXT_MEMORY_DESCRIPTOR(MemoryAttributesEntry, DescriptorSize);
+
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, DescriptorSize);
+ }
+
+ Status = gSmst->SmmInstallConfigurationTable (gSmst, &gEdkiiPiSmmMemoryAttributesTableGuid, MemoryAttributesTable, MemoryAttributesTableSize);
+ ASSERT_EFI_ERROR (Status);
+}
+
+/**
+ This function returns if image is inside SMRAM.
+
+ @param[in] LoadedImage LoadedImage protocol instance for an image.
+
+ @retval TRUE the image is inside SMRAM.
+ @retval FALSE the image is outside SMRAM.
+**/
+BOOLEAN
+IsImageInsideSmram (
+ IN EFI_LOADED_IMAGE_PROTOCOL *LoadedImage
+ )
+{
+ UINTN Index;
+
+ for (Index = 0; Index < mFullSmramRangeCount; Index++) {
+ if ((mFullSmramRanges[Index].PhysicalStart <= (UINTN)LoadedImage->ImageBase)&&
+ (mFullSmramRanges[Index].PhysicalStart + mFullSmramRanges[Index].PhysicalSize >= (UINTN)LoadedImage->ImageBase + LoadedImage->ImageSize)) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/**
+ This function installs all SMM image record information.
+**/
+VOID
+SmmInstallImageRecord (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ UINTN NoHandles;
+ EFI_HANDLE *HandleBuffer;
+ EFI_LOADED_IMAGE_PROTOCOL *LoadedImage;
+ UINTN Index;
+ EFI_SMM_DRIVER_ENTRY DriverEntry;
+
+ Status = SmmLocateHandleBuffer (
+ ByProtocol,
+ &gEfiLoadedImageProtocolGuid,
+ NULL,
+ &NoHandles,
+ &HandleBuffer
+ );
+ if (EFI_ERROR (Status)) {
+ return ;
+ }
+
+ for (Index = 0; Index < NoHandles; Index++) {
+ Status = gSmst->SmmHandleProtocol (
+ HandleBuffer[Index],
+ &gEfiLoadedImageProtocolGuid,
+ (VOID **)&LoadedImage
+ );
+ if (EFI_ERROR (Status)) {
+ continue;
+ }
+ DEBUG ((DEBUG_VERBOSE, "LoadedImage - 0x%x 0x%x ", LoadedImage->ImageBase, LoadedImage->ImageSize));
+ {
+ VOID *PdbPointer;
+ PdbPointer = PeCoffLoaderGetPdbPointer (LoadedImage->ImageBase);
+ if (PdbPointer != NULL) {
+ DEBUG ((DEBUG_VERBOSE, "(%a) ", PdbPointer));
+ }
+ }
+ DEBUG ((DEBUG_VERBOSE, "\n"));
+ ZeroMem (&DriverEntry, sizeof(DriverEntry));
+ DriverEntry.ImageBuffer = (UINTN)LoadedImage->ImageBase;
+ DriverEntry.NumberOfPage = EFI_SIZE_TO_PAGES((UINTN)LoadedImage->ImageSize);
+ SmmInsertImageRecord (&DriverEntry);
+ }
+
+ FreePool (HandleBuffer);
+}
+
+/**
+ Install MemoryAttributesTable.
+
+ @param[in] Protocol Points to the protocol's unique identifier.
+ @param[in] Interface Points to the interface instance.
+ @param[in] Handle The handle on which the interface was installed.
+
+ @retval EFI_SUCCESS Notification runs successfully.
+**/
+EFI_STATUS
+EFIAPI
+SmmInstallMemoryAttributesTable (
+ IN CONST EFI_GUID *Protocol,
+ IN VOID *Interface,
+ IN EFI_HANDLE Handle
+ )
+{
+ SmmInstallImageRecord ();
+
+ DEBUG ((DEBUG_INFO, "SMM MemoryProtectionAttribute - 0x%016lx\n", mMemoryProtectionAttribute));
+ if ((mMemoryProtectionAttribute & EFI_MEMORY_ATTRIBUTES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA) == 0) {
+ return EFI_SUCCESS;
+ }
+
+ DEBUG ((DEBUG_VERBOSE, "SMM Total Image Count - 0x%x\n", mImagePropertiesPrivateData.ImageRecordCount));
+ DEBUG ((DEBUG_VERBOSE, "SMM Dump ImageRecord:\n"));
+ DumpImageRecord ();
+
+ PublishMemoryAttributesTable ();
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Initialize MemoryAttributesTable support.
+**/
+VOID
+EFIAPI
+SmmCoreInitializeMemoryAttributesTable (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ VOID *Registration;
+
+ Status = gSmst->SmmRegisterProtocolNotify (
+ &gEfiSmmEndOfDxeProtocolGuid,
+ SmmInstallMemoryAttributesTable,
+ &Registration
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ return ;
+}
diff --git a/MdeModulePkg/Core/PiSmmCore/Page.c b/MdeModulePkg/Core/PiSmmCore/Page.c
index 5c04e8c..5f19d7e 100644
--- a/MdeModulePkg/Core/PiSmmCore/Page.c
+++ b/MdeModulePkg/Core/PiSmmCore/Page.c
@@ -2,22 +2,572 @@
SMM Memory page management functions.
Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
- This program and the accompanying materials are licensed and made available
- under the terms and conditions of the BSD License which accompanies this
- distribution. The full text of the license may be found at
- http://opensource.org/licenses/bsd-license.php
+ This program and the accompanying materials are licensed and made available
+ under the terms and conditions of the BSD License which accompanies this
+ distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
- THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
- WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "PiSmmCore.h"
+#include <Library/SmmServicesTableLib.h>
#define TRUNCATE_TO_PAGES(a) ((a) >> EFI_PAGE_SHIFT)
LIST_ENTRY mSmmMemoryMap = INITIALIZE_LIST_HEAD_VARIABLE (mSmmMemoryMap);
+//
+// For GetMemoryMap()
+//
+
+#define MEMORY_MAP_SIGNATURE SIGNATURE_32('m','m','a','p')
+typedef struct {
+ UINTN Signature;
+ LIST_ENTRY Link;
+
+ BOOLEAN FromStack;
+ EFI_MEMORY_TYPE Type;
+ UINT64 Start;
+ UINT64 End;
+
+} MEMORY_MAP;
+
+LIST_ENTRY gMemoryMap = INITIALIZE_LIST_HEAD_VARIABLE (gMemoryMap);
+
+
+#define MAX_MAP_DEPTH 6
+
+///
+/// mMapDepth - depth of new descriptor stack
+///
+UINTN mMapDepth = 0;
+///
+/// mMapStack - space to use as temp storage to build new map descriptors
+///
+MEMORY_MAP mMapStack[MAX_MAP_DEPTH];
+UINTN mFreeMapStack = 0;
+///
+/// This list maintain the free memory map list
+///
+LIST_ENTRY mFreeMemoryMapEntryList = INITIALIZE_LIST_HEAD_VARIABLE (mFreeMemoryMapEntryList);
+
+/**
+ Allocates pages from the memory map.
+
+ @param[in] Type The type of allocation to perform.
+ @param[in] MemoryType The type of memory to turn the allocated pages
+ into.
+ @param[in] NumberOfPages The number of pages to allocate.
+ @param[out] Memory A pointer to receive the base allocated memory
+ address.
+ @param[in] AddRegion If this memory is new added region.
+
+ @retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in spec.
+ @retval EFI_NOT_FOUND Could not allocate pages match the requirement.
+ @retval EFI_OUT_OF_RESOURCES No enough pages to allocate.
+ @retval EFI_SUCCESS Pages successfully allocated.
+
+**/
+EFI_STATUS
+SmmInternalAllocatePagesEx (
+ IN EFI_ALLOCATE_TYPE Type,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN NumberOfPages,
+ OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN BOOLEAN AddRegion
+ );
+
+/**
+ Internal function. Deque a descriptor entry from the mFreeMemoryMapEntryList.
+ If the list is emtry, then allocate a new page to refuel the list.
+ Please Note this algorithm to allocate the memory map descriptor has a property
+ that the memory allocated for memory entries always grows, and will never really be freed.
+
+ @return The Memory map descriptor dequed from the mFreeMemoryMapEntryList
+
+**/
+MEMORY_MAP *
+AllocateMemoryMapEntry (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS Mem;
+ EFI_STATUS Status;
+ MEMORY_MAP* FreeDescriptorEntries;
+ MEMORY_MAP* Entry;
+ UINTN Index;
+
+ //DEBUG((DEBUG_INFO, "AllocateMemoryMapEntry\n"));
+
+ if (IsListEmpty (&mFreeMemoryMapEntryList)) {
+ //DEBUG((DEBUG_INFO, "mFreeMemoryMapEntryList is empty\n"));
+ //
+ // The list is empty, to allocate one page to refuel the list
+ //
+ Status = SmmInternalAllocatePagesEx (
+ AllocateAnyPages,
+ EfiRuntimeServicesData,
+ EFI_SIZE_TO_PAGES(DEFAULT_PAGE_ALLOCATION),
+ &Mem,
+ TRUE
+ );
+ ASSERT_EFI_ERROR (Status);
+ if(!EFI_ERROR (Status)) {
+ FreeDescriptorEntries = (MEMORY_MAP *)(UINTN)Mem;
+ //DEBUG((DEBUG_INFO, "New FreeDescriptorEntries - 0x%x\n", FreeDescriptorEntries));
+ //
+ // Enque the free memmory map entries into the list
+ //
+ for (Index = 0; Index< DEFAULT_PAGE_ALLOCATION / sizeof(MEMORY_MAP); Index++) {
+ FreeDescriptorEntries[Index].Signature = MEMORY_MAP_SIGNATURE;
+ InsertTailList (&mFreeMemoryMapEntryList, &FreeDescriptorEntries[Index].Link);
+ }
+ } else {
+ return NULL;
+ }
+ }
+ //
+ // dequeue the first descriptor from the list
+ //
+ Entry = CR (mFreeMemoryMapEntryList.ForwardLink, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
+ RemoveEntryList (&Entry->Link);
+
+ return Entry;
+}
+
+
+/**
+ Internal function. Moves any memory descriptors that are on the
+ temporary descriptor stack to heap.
+
+**/
+VOID
+CoreFreeMemoryMapStack (
+ VOID
+ )
+{
+ MEMORY_MAP *Entry;
+
+ //
+ // If already freeing the map stack, then return
+ //
+ if (mFreeMapStack != 0) {
+ ASSERT (FALSE);
+ return ;
+ }
+
+ //
+ // Move the temporary memory descriptor stack into pool
+ //
+ mFreeMapStack += 1;
+
+ while (mMapDepth != 0) {
+ //
+ // Deque an memory map entry from mFreeMemoryMapEntryList
+ //
+ Entry = AllocateMemoryMapEntry ();
+ ASSERT (Entry);
+
+ //
+ // Update to proper entry
+ //
+ mMapDepth -= 1;
+
+ if (mMapStack[mMapDepth].Link.ForwardLink != NULL) {
+
+ CopyMem (Entry , &mMapStack[mMapDepth], sizeof (MEMORY_MAP));
+ Entry->FromStack = FALSE;
+
+ //
+ // Move this entry to general memory
+ //
+ InsertTailList (&mMapStack[mMapDepth].Link, &Entry->Link);
+ RemoveEntryList (&mMapStack[mMapDepth].Link);
+ mMapStack[mMapDepth].Link.ForwardLink = NULL;
+ }
+ }
+
+ mFreeMapStack -= 1;
+}
+
+/**
+ Insert new entry from memory map.
+
+ @param[in] Link The old memory map entry to be linked.
+ @param[in] Start The start address of new memory map entry.
+ @param[in] End The end address of new memory map entry.
+ @param[in] Type The type of new memory map entry.
+ @param[in] Next If new entry is inserted to the next of old entry.
+ @param[in] AddRegion If this memory is new added region.
+**/
+VOID
+InsertNewEntry (
+ IN LIST_ENTRY *Link,
+ IN UINT64 Start,
+ IN UINT64 End,
+ IN EFI_MEMORY_TYPE Type,
+ IN BOOLEAN Next,
+ IN BOOLEAN AddRegion
+ )
+{
+ MEMORY_MAP *Entry;
+
+ Entry = &mMapStack[mMapDepth];
+ mMapDepth += 1;
+ ASSERT (mMapDepth < MAX_MAP_DEPTH);
+ Entry->FromStack = TRUE;
+
+ Entry->Signature = MEMORY_MAP_SIGNATURE;
+ Entry->Type = Type;
+ Entry->Start = Start;
+ Entry->End = End;
+ if (Next) {
+ InsertHeadList (Link, &Entry->Link);
+ } else {
+ InsertTailList (Link, &Entry->Link);
+ }
+}
+
+/**
+ Remove old entry from memory map.
+
+ @param[in] Entry Memory map entry to be removed.
+**/
+VOID
+RemoveOldEntry (
+ IN MEMORY_MAP *Entry
+ )
+{
+ RemoveEntryList (&Entry->Link);
+ if (!Entry->FromStack) {
+ InsertTailList (&mFreeMemoryMapEntryList, &Entry->Link);
+ }
+}
+
+/**
+ Update SMM memory map entry.
+
+ @param[in] Type The type of allocation to perform.
+ @param[in] Memory The base of memory address.
+ @param[in] NumberOfPages The number of pages to allocate.
+ @param[in] AddRegion If this memory is new added region.
+**/
+VOID
+ConvertSmmMemoryMapEntry (
+ IN EFI_MEMORY_TYPE Type,
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages,
+ IN BOOLEAN AddRegion
+ )
+{
+ LIST_ENTRY *Link;
+ MEMORY_MAP *Entry;
+ MEMORY_MAP *NextEntry;
+ LIST_ENTRY *NextLink;
+ MEMORY_MAP *PreviousEntry;
+ LIST_ENTRY *PreviousLink;
+ EFI_PHYSICAL_ADDRESS Start;
+ EFI_PHYSICAL_ADDRESS End;
+
+ Start = Memory;
+ End = Memory + EFI_PAGES_TO_SIZE(NumberOfPages) - 1;
+
+ //
+ // Exclude memory region
+ //
+ Link = gMemoryMap.ForwardLink;
+ while (Link != &gMemoryMap) {
+ Entry = CR (Link, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
+ Link = Link->ForwardLink;
+
+ //
+ // ---------------------------------------------------
+ // | +----------+ +------+ +------+ +------+ |
+ // ---|gMemoryMep|---|Entry1|---|Entry2|---|Entry3|---
+ // +----------+ ^ +------+ +------+ +------+
+ // |
+ // +------+
+ // |EntryX|
+ // +------+
+ //
+ if (Entry->Start > End) {
+ if ((Entry->Start == End + 1) && (Entry->Type == Type)) {
+ Entry->Start = Start;
+ return ;
+ }
+ InsertNewEntry (
+ &Entry->Link,
+ Start,
+ End,
+ Type,
+ FALSE,
+ AddRegion
+ );
+ return ;
+ }
+
+ if ((Entry->Start <= Start) && (Entry->End >= End)) {
+ if (Entry->Type != Type) {
+ if (Entry->Start < Start) {
+ //
+ // ---------------------------------------------------
+ // | +----------+ +------+ +------+ +------+ |
+ // ---|gMemoryMep|---|Entry1|---|EntryX|---|Entry3|---
+ // +----------+ +------+ ^ +------+ +------+
+ // |
+ // +------+
+ // |EntryA|
+ // +------+
+ //
+ InsertNewEntry (
+ &Entry->Link,
+ Entry->Start,
+ Start - 1,
+ Entry->Type,
+ FALSE,
+ AddRegion
+ );
+ }
+ if (Entry->End > End) {
+ //
+ // ---------------------------------------------------
+ // | +----------+ +------+ +------+ +------+ |
+ // ---|gMemoryMep|---|Entry1|---|EntryX|---|Entry3|---
+ // +----------+ +------+ +------+ ^ +------+
+ // |
+ // +------+
+ // |EntryZ|
+ // +------+
+ //
+ InsertNewEntry (
+ &Entry->Link,
+ End + 1,
+ Entry->End,
+ Entry->Type,
+ TRUE,
+ AddRegion
+ );
+ }
+ //
+ // Update this node
+ //
+ Entry->Start = Start;
+ Entry->End = End;
+ Entry->Type = Type;
+
+ //
+ // Check adjacent
+ //
+ NextLink = Entry->Link.ForwardLink;
+ if (NextLink != &gMemoryMap) {
+ NextEntry = CR (NextLink, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
+ //
+ // ---------------------------------------------------
+ // | +----------+ +------+ +-----------------+ |
+ // ---|gMemoryMep|---|Entry1|---|EntryX Entry3|---
+ // +----------+ +------+ +-----------------+
+ //
+ if ((Entry->Type == NextEntry->Type) && (Entry->End + 1 == NextEntry->Start)) {
+ Entry->End = NextEntry->End;
+ RemoveOldEntry (NextEntry);
+ }
+ }
+ PreviousLink = Entry->Link.BackLink;
+ if (PreviousLink != &gMemoryMap) {
+ PreviousEntry = CR (PreviousLink, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
+ //
+ // ---------------------------------------------------
+ // | +----------+ +-----------------+ +------+ |
+ // ---|gMemoryMep|---|Entry1 EntryX|---|Entry3|---
+ // +----------+ +-----------------+ +------+
+ //
+ if ((PreviousEntry->Type == Entry->Type) && (PreviousEntry->End + 1 == Entry->Start)) {
+ PreviousEntry->End = Entry->End;
+ RemoveOldEntry (Entry);
+ }
+ }
+ }
+ return ;
+ }
+ }
+
+ //
+ // ---------------------------------------------------
+ // | +----------+ +------+ +------+ +------+ |
+ // ---|gMemoryMep|---|Entry1|---|Entry2|---|Entry3|---
+ // +----------+ +------+ +------+ +------+ ^
+ // |
+ // +------+
+ // |EntryX|
+ // +------+
+ //
+ Link = gMemoryMap.BackLink;
+ if (Link != &gMemoryMap) {
+ Entry = CR (Link, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
+ if ((Entry->End + 1 == Start) && (Entry->Type == Type)) {
+ Entry->End = End;
+ return ;
+ }
+ }
+ InsertNewEntry (
+ &gMemoryMap,
+ Start,
+ End,
+ Type,
+ FALSE,
+ AddRegion
+ );
+ return ;
+}
+
+/**
+ Return the count of Smm memory map entry.
+
+ @return The count of Smm memory map entry.
+**/
+UINTN
+GetSmmMemoryMapEntryCount (
+ VOID
+ )
+{
+ LIST_ENTRY *Link;
+ UINTN Count;
+
+ Count = 0;
+ Link = gMemoryMap.ForwardLink;
+ while (Link != &gMemoryMap) {
+ Link = Link->ForwardLink;
+ Count++;
+ }
+ return Count;
+}
+
+/**
+ Dump Smm memory map entry.
+**/
+VOID
+DumpSmmMemoryMapEntry (
+ VOID
+ )
+{
+ LIST_ENTRY *Link;
+ MEMORY_MAP *Entry;
+ EFI_PHYSICAL_ADDRESS Last;
+
+ Last = 0;
+ DEBUG ((DEBUG_INFO, "DumpSmmMemoryMapEntry:\n"));
+ Link = gMemoryMap.ForwardLink;
+ while (Link != &gMemoryMap) {
+ Entry = CR (Link, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
+ Link = Link->ForwardLink;
+
+ if ((Last != 0) && (Last != (UINT64)-1)) {
+ if (Last + 1 != Entry->Start) {
+ Last = (UINT64)-1;
+ } else {
+ Last = Entry->End;
+ }
+ } else if (Last == 0) {
+ Last = Entry->End;
+ }
+
+ DEBUG ((DEBUG_INFO, "Entry (Link - 0x%x)\n", &Entry->Link));
+ DEBUG ((DEBUG_INFO, " Signature - 0x%x\n", Entry->Signature));
+ DEBUG ((DEBUG_INFO, " Link.ForwardLink - 0x%x\n", Entry->Link.ForwardLink));
+ DEBUG ((DEBUG_INFO, " Link.BackLink - 0x%x\n", Entry->Link.BackLink));
+ DEBUG ((DEBUG_INFO, " Type - 0x%x\n", Entry->Type));
+ DEBUG ((DEBUG_INFO, " Start - 0x%016lx\n", Entry->Start));
+ DEBUG ((DEBUG_INFO, " End - 0x%016lx\n", Entry->End));
+ }
+
+ ASSERT (Last != (UINT64)-1);
+}
+
+/**
+ Dump Smm memory map.
+**/
+VOID
+DumpSmmMemoryMap (
+ VOID
+ )
+{
+ LIST_ENTRY *Node;
+ FREE_PAGE_LIST *Pages;
+
+ DEBUG ((DEBUG_INFO, "DumpSmmMemoryMap\n"));
+
+ Pages = NULL;
+ Node = mSmmMemoryMap.ForwardLink;
+ while (Node != &mSmmMemoryMap) {
+ Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
+ DEBUG ((DEBUG_INFO, "Pages - 0x%x\n", Pages));
+ DEBUG ((DEBUG_INFO, "Pages->NumberOfPages - 0x%x\n", Pages->NumberOfPages));
+ Node = Node->ForwardLink;
+ }
+}
+
+/**
+ Check if a Smm base~length is in Smm memory map.
+
+ @param[in] Base The base address of Smm memory to be checked.
+ @param[in] Length THe length of Smm memory to be checked.
+
+ @retval TRUE Smm base~length is in smm memory map.
+ @retval FALSE Smm base~length is in smm memory map.
+**/
+BOOLEAN
+SmmMemoryMapConsistencyCheckRange (
+ IN EFI_PHYSICAL_ADDRESS Base,
+ IN UINTN Length
+ )
+{
+ LIST_ENTRY *Link;
+ MEMORY_MAP *Entry;
+ BOOLEAN Result;
+
+ Result = FALSE;
+ Link = gMemoryMap.ForwardLink;
+ while (Link != &gMemoryMap) {
+ Entry = CR (Link, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
+ Link = Link->ForwardLink;
+
+ if (Entry->Type != EfiConventionalMemory) {
+ continue;
+ }
+ if (Entry->Start == Base && Entry->End == Base + Length - 1) {
+ Result = TRUE;
+ break;
+ }
+ }
+
+ return Result;
+}
+
+/**
+ Check the consistency of Smm memory map.
+**/
+VOID
+SmmMemoryMapConsistencyCheck (
+ VOID
+ )
+{
+ LIST_ENTRY *Node;
+ FREE_PAGE_LIST *Pages;
+ BOOLEAN Result;
+
+ Pages = NULL;
+ Node = mSmmMemoryMap.ForwardLink;
+ while (Node != &mSmmMemoryMap) {
+ Pages = BASE_CR (Node, FREE_PAGE_LIST, Link);
+ Result = SmmMemoryMapConsistencyCheckRange ((EFI_PHYSICAL_ADDRESS)(UINTN)Pages, (UINTN)EFI_PAGES_TO_SIZE(Pages->NumberOfPages));
+ ASSERT (Result);
+ Node = Node->ForwardLink;
+ }
+}
+
/**
Internal Function. Allocate n pages from given free page node.
@@ -131,12 +681,13 @@ InternalAllocAddress (
/**
Allocates pages from the memory map.
- @param Type The type of allocation to perform.
- @param MemoryType The type of memory to turn the allocated pages
- into.
- @param NumberOfPages The number of pages to allocate.
- @param Memory A pointer to receive the base allocated memory
- address.
+ @param[in] Type The type of allocation to perform.
+ @param[in] MemoryType The type of memory to turn the allocated pages
+ into.
+ @param[in] NumberOfPages The number of pages to allocate.
+ @param[out] Memory A pointer to receive the base allocated memory
+ address.
+ @param[in] AddRegion If this memory is new added region.
@retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in spec.
@retval EFI_NOT_FOUND Could not allocate pages match the requirement.
@@ -145,12 +696,12 @@ InternalAllocAddress (
**/
EFI_STATUS
-EFIAPI
-SmmInternalAllocatePages (
+SmmInternalAllocatePagesEx (
IN EFI_ALLOCATE_TYPE Type,
IN EFI_MEMORY_TYPE MemoryType,
IN UINTN NumberOfPages,
- OUT EFI_PHYSICAL_ADDRESS *Memory
+ OUT EFI_PHYSICAL_ADDRESS *Memory,
+ IN BOOLEAN AddRegion
)
{
UINTN RequestedAddress;
@@ -179,7 +730,7 @@ SmmInternalAllocatePages (
);
if (*Memory == (UINTN)-1) {
return EFI_OUT_OF_RESOURCES;
- }
+ }
break;
case AllocateAddress:
*Memory = InternalAllocAddress (
@@ -194,12 +745,49 @@ SmmInternalAllocatePages (
default:
return EFI_INVALID_PARAMETER;
}
+
+ //
+ // Update SmmMemoryMap here.
+ //
+ ConvertSmmMemoryMapEntry (MemoryType, *Memory, NumberOfPages, AddRegion);
+ if (!AddRegion) {
+ CoreFreeMemoryMapStack();
+ }
+
return EFI_SUCCESS;
}
/**
Allocates pages from the memory map.
+ @param[in] Type The type of allocation to perform.
+ @param[in] MemoryType The type of memory to turn the allocated pages
+ into.
+ @param[in] NumberOfPages The number of pages to allocate.
+ @param[out] Memory A pointer to receive the base allocated memory
+ address.
+
+ @retval EFI_INVALID_PARAMETER Parameters violate checking rules defined in spec.
+ @retval EFI_NOT_FOUND Could not allocate pages match the requirement.
+ @retval EFI_OUT_OF_RESOURCES No enough pages to allocate.
+ @retval EFI_SUCCESS Pages successfully allocated.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmInternalAllocatePages (
+ IN EFI_ALLOCATE_TYPE Type,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN NumberOfPages,
+ OUT EFI_PHYSICAL_ADDRESS *Memory
+ )
+{
+ return SmmInternalAllocatePagesEx (Type, MemoryType, NumberOfPages, Memory, FALSE);
+}
+
+/**
+ Allocates pages from the memory map.
+
@param Type The type of allocation to perform.
@param MemoryType The type of memory to turn the allocated pages
into.
@@ -268,8 +856,9 @@ InternalMergeNodes (
/**
Frees previous allocated pages.
- @param Memory Base address of memory being freed.
- @param NumberOfPages The number of pages to free.
+ @param[in] Memory Base address of memory being freed.
+ @param[in] NumberOfPages The number of pages to free.
+ @param[in] AddRegion If this memory is new added region.
@retval EFI_NOT_FOUND Could not find the entry that covers the range.
@retval EFI_INVALID_PARAMETER Address not aligned.
@@ -277,10 +866,10 @@ InternalMergeNodes (
**/
EFI_STATUS
-EFIAPI
-SmmInternalFreePages (
+SmmInternalFreePagesEx (
IN EFI_PHYSICAL_ADDRESS Memory,
- IN UINTN NumberOfPages
+ IN UINTN NumberOfPages,
+ IN BOOLEAN AddRegion
)
{
LIST_ENTRY *Node;
@@ -326,12 +915,41 @@ SmmInternalFreePages (
InternalMergeNodes (Pages);
}
+ //
+ // Update SmmMemoryMap here.
+ //
+ ConvertSmmMemoryMapEntry (EfiConventionalMemory, Memory, NumberOfPages, AddRegion);
+ if (!AddRegion) {
+ CoreFreeMemoryMapStack();
+ }
+
return EFI_SUCCESS;
}
/**
Frees previous allocated pages.
+ @param[in] Memory Base address of memory being freed.
+ @param[in] NumberOfPages The number of pages to free.
+
+ @retval EFI_NOT_FOUND Could not find the entry that covers the range.
+ @retval EFI_INVALID_PARAMETER Address not aligned.
+ @return EFI_SUCCESS Pages successfully freed.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmInternalFreePages (
+ IN EFI_PHYSICAL_ADDRESS Memory,
+ IN UINTN NumberOfPages
+ )
+{
+ return SmmInternalFreePagesEx (Memory, NumberOfPages, FALSE);
+}
+
+/**
+ Frees previous allocated pages.
+
@param Memory Base address of memory being freed.
@param NumberOfPages The number of pages to free.
@@ -383,16 +1001,121 @@ SmmAddMemoryRegion (
UINTN AlignedMemBase;
//
- // Do not add memory regions that is already allocated, needs testing, or needs ECC initialization
+ // Add EfiRuntimeServicesData for memory regions that is already allocated, needs testing, or needs ECC initialization
//
if ((Attributes & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
- return;
+ Type = EfiRuntimeServicesData;
+ } else {
+ Type = EfiConventionalMemory;
}
-
+
+ DEBUG ((DEBUG_INFO, "SmmAddMemoryRegion\n"));
+ DEBUG ((DEBUG_INFO, " MemBase - 0x%lx\n", MemBase));
+ DEBUG ((DEBUG_INFO, " MemLength - 0x%lx\n", MemLength));
+ DEBUG ((DEBUG_INFO, " Type - 0x%x\n", Type));
+ DEBUG ((DEBUG_INFO, " Attributes - 0x%lx\n", Attributes));
+
//
// Align range on an EFI_PAGE_SIZE boundary
- //
+ //
AlignedMemBase = (UINTN)(MemBase + EFI_PAGE_MASK) & ~EFI_PAGE_MASK;
MemLength -= AlignedMemBase - MemBase;
- SmmFreePages (AlignedMemBase, TRUNCATE_TO_PAGES ((UINTN)MemLength));
+ if (Type == EfiConventionalMemory) {
+ SmmInternalFreePagesEx (AlignedMemBase, TRUNCATE_TO_PAGES ((UINTN)MemLength), TRUE);
+ } else {
+ ConvertSmmMemoryMapEntry (EfiRuntimeServicesData, AlignedMemBase, TRUNCATE_TO_PAGES ((UINTN)MemLength), TRUE);
+ }
+
+ CoreFreeMemoryMapStack ();
+}
+
+/**
+ This function returns a copy of the current memory map. The map is an array of
+ memory descriptors, each of which describes a contiguous block of memory.
+
+ @param[in, out] MemoryMapSize A pointer to the size, in bytes, of the
+ MemoryMap buffer. On input, this is the size of
+ the buffer allocated by the caller. On output,
+ it is the size of the buffer returned by the
+ firmware if the buffer was large enough, or the
+ size of the buffer needed to contain the map if
+ the buffer was too small.
+ @param[in, out] MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param[out] MapKey A pointer to the location in which firmware
+ returns the key for the current memory map.
+ @param[out] DescriptorSize A pointer to the location in which firmware
+ returns the size, in bytes, of an individual
+ EFI_MEMORY_DESCRIPTOR.
+ @param[out] DescriptorVersion A pointer to the location in which firmware
+ returns the version number associated with the
+ EFI_MEMORY_DESCRIPTOR.
+
+ @retval EFI_SUCCESS The memory map was returned in the MemoryMap
+ buffer.
+ @retval EFI_BUFFER_TOO_SMALL The MemoryMap buffer was too small. The current
+ buffer size needed to hold the memory map is
+ returned in MemoryMapSize.
+ @retval EFI_INVALID_PARAMETER One of the parameters has an invalid value.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCoreGetMemoryMap (
+ IN OUT UINTN *MemoryMapSize,
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ OUT UINTN *MapKey,
+ OUT UINTN *DescriptorSize,
+ OUT UINT32 *DescriptorVersion
+ )
+{
+ UINTN Count;
+ LIST_ENTRY *Link;
+ MEMORY_MAP *Entry;
+ UINTN Size;
+ UINTN BufferSize;
+
+ Size = sizeof (EFI_MEMORY_DESCRIPTOR);
+
+ //
+ // Make sure Size != sizeof(EFI_MEMORY_DESCRIPTOR). This will
+ // prevent people from having pointer math bugs in their code.
+ // now you have to use *DescriptorSize to make things work.
+ //
+ Size += sizeof(UINT64) - (Size % sizeof (UINT64));
+
+ if (DescriptorSize != NULL) {
+ *DescriptorSize = Size;
+ }
+
+ if (DescriptorVersion != NULL) {
+ *DescriptorVersion = EFI_MEMORY_DESCRIPTOR_VERSION;
+ }
+
+ Count = GetSmmMemoryMapEntryCount ();
+ BufferSize = Size * Count;
+ if (*MemoryMapSize < BufferSize) {
+ *MemoryMapSize = BufferSize;
+ return EFI_BUFFER_TOO_SMALL;
+ }
+
+ *MemoryMapSize = BufferSize;
+ if (MemoryMap == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ ZeroMem (MemoryMap, BufferSize);
+ Link = gMemoryMap.ForwardLink;
+ while (Link != &gMemoryMap) {
+ Entry = CR (Link, MEMORY_MAP, Link, MEMORY_MAP_SIGNATURE);
+ Link = Link->ForwardLink;
+
+ MemoryMap->Type = Entry->Type;
+ MemoryMap->PhysicalStart = Entry->Start;
+ MemoryMap->NumberOfPages = RShiftU64 (Entry->End - Entry->Start + 1, EFI_PAGE_SHIFT);
+
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR (MemoryMap, Size);
+ }
+
+ return EFI_SUCCESS;
}
diff --git a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.c b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.c
index 2bdb19c..b877a33 100644
--- a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.c
+++ b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.c
@@ -87,6 +87,8 @@ SMM_CORE_SMI_HANDLERS mSmmCoreSmiHandlers[] = {
UINTN mFullSmramRangeCount;
EFI_SMRAM_DESCRIPTOR *mFullSmramRanges;
+EFI_SMM_DRIVER_ENTRY *mSmmCoreDriverEntry;
+
EFI_LOADED_IMAGE_PROTOCOL *mSmmCoreLoadedImage;
/**
@@ -564,6 +566,42 @@ SmmCoreInstallLoadedImage (
);
ASSERT_EFI_ERROR (Status);
+ //
+ // Allocate a Loaded Image Protocol in SMM
+ //
+ Status = SmmAllocatePool (EfiRuntimeServicesData, sizeof(EFI_SMM_DRIVER_ENTRY), (VOID **)&mSmmCoreDriverEntry);
+ ASSERT_EFI_ERROR(Status);
+
+ ZeroMem (mSmmCoreDriverEntry, sizeof(EFI_SMM_DRIVER_ENTRY));
+ //
+ // Fill in the remaining fields of the Loaded Image Protocol instance.
+ //
+ mSmmCoreDriverEntry->Signature = EFI_SMM_DRIVER_ENTRY_SIGNATURE;
+ mSmmCoreDriverEntry->SmmLoadedImage.Revision = EFI_LOADED_IMAGE_PROTOCOL_REVISION;
+ mSmmCoreDriverEntry->SmmLoadedImage.ParentHandle = gSmmCorePrivate->SmmIplImageHandle;
+ mSmmCoreDriverEntry->SmmLoadedImage.SystemTable = gST;
+
+ mSmmCoreDriverEntry->SmmLoadedImage.ImageBase = (VOID *)(UINTN)gSmmCorePrivate->PiSmmCoreImageBase;
+ mSmmCoreDriverEntry->SmmLoadedImage.ImageSize = gSmmCorePrivate->PiSmmCoreImageSize;
+ mSmmCoreDriverEntry->SmmLoadedImage.ImageCodeType = EfiRuntimeServicesCode;
+ mSmmCoreDriverEntry->SmmLoadedImage.ImageDataType = EfiRuntimeServicesData;
+
+ mSmmCoreDriverEntry->ImageEntryPoint = gSmmCorePrivate->PiSmmCoreEntryPoint;
+ mSmmCoreDriverEntry->ImageBuffer = gSmmCorePrivate->PiSmmCoreImageBase;
+ mSmmCoreDriverEntry->NumberOfPage = EFI_SIZE_TO_PAGES((UINTN)gSmmCorePrivate->PiSmmCoreImageSize);
+
+ //
+ // Create a new image handle in the SMM handle database for the SMM Driver
+ //
+ mSmmCoreDriverEntry->SmmImageHandle = NULL;
+ Status = SmmInstallProtocolInterface (
+ &mSmmCoreDriverEntry->SmmImageHandle,
+ &gEfiLoadedImageProtocolGuid,
+ EFI_NATIVE_INTERFACE,
+ &mSmmCoreDriverEntry->SmmLoadedImage
+ );
+ ASSERT_EFI_ERROR(Status);
+
return ;
}
@@ -636,5 +674,7 @@ SmmMain (
SmmCoreInstallLoadedImage ();
+ SmmCoreInitializeMemoryAttributesTable ();
+
return EFI_SUCCESS;
}
diff --git a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.h b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.h
index f46ee72..e2fee54 100644
--- a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.h
+++ b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.h
@@ -110,6 +110,8 @@ typedef struct {
// Image Page Number
//
UINTN NumberOfPage;
+ EFI_HANDLE SmmImageHandle;
+ EFI_LOADED_IMAGE_PROTOCOL SmmLoadedImage;
} EFI_SMM_DRIVER_ENTRY;
#define EFI_HANDLE_SIGNATURE SIGNATURE_32('h','n','d','l')
@@ -551,6 +553,38 @@ SmmLocateProtocol (
);
/**
+ Function returns an array of handles that support the requested protocol
+ in a buffer allocated from pool. This is a version of SmmLocateHandle()
+ that allocates a buffer for the caller.
+
+ @param SearchType Specifies which handle(s) are to be returned.
+ @param Protocol Provides the protocol to search by. This
+ parameter is only valid for SearchType
+ ByProtocol.
+ @param SearchKey Supplies the search key depending on the
+ SearchType.
+ @param NumberHandles The number of handles returned in Buffer.
+ @param Buffer A pointer to the buffer to return the requested
+ array of handles that support Protocol.
+
+ @retval EFI_SUCCESS The result array of handles was returned.
+ @retval EFI_NOT_FOUND No handles match the search.
+ @retval EFI_OUT_OF_RESOURCES There is not enough pool memory to store the
+ matching results.
+ @retval EFI_INVALID_PARAMETER One or more paramters are not valid.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmLocateHandleBuffer (
+ IN EFI_LOCATE_SEARCH_TYPE SearchType,
+ IN EFI_GUID *Protocol OPTIONAL,
+ IN VOID *SearchKey OPTIONAL,
+ IN OUT UINTN *NumberHandles,
+ OUT EFI_HANDLE **Buffer
+ );
+
+/**
Manage SMI of a particular type.
@param HandlerType Points to the handler type or NULL for root SMI handlers.
@@ -980,9 +1014,66 @@ SmramProfileReadyToLock (
VOID
);
+/**
+ Initialize MemoryAttributes support.
+**/
+VOID
+EFIAPI
+SmmCoreInitializeMemoryAttributesTable (
+ VOID
+ );
+
+/**
+ This function returns a copy of the current memory map. The map is an array of
+ memory descriptors, each of which describes a contiguous block of memory.
+
+ @param[in, out] MemoryMapSize A pointer to the size, in bytes, of the
+ MemoryMap buffer. On input, this is the size of
+ the buffer allocated by the caller. On output,
+ it is the size of the buffer returned by the
+ firmware if the buffer was large enough, or the
+ size of the buffer needed to contain the map if
+ the buffer was too small.
+ @param[in, out] MemoryMap A pointer to the buffer in which firmware places
+ the current memory map.
+ @param[out] MapKey A pointer to the location in which firmware
+ returns the key for the current memory map.
+ @param[out] DescriptorSize A pointer to the location in which firmware
+ returns the size, in bytes, of an individual
+ EFI_MEMORY_DESCRIPTOR.
+ @param[out] DescriptorVersion A pointer to the location in which firmware
+ returns the version number associated with the
+ EFI_MEMORY_DESCRIPTOR.
+
+ @retval EFI_SUCCESS The memory map was returned in the MemoryMap
+ buffer.
+ @retval EFI_BUFFER_TOO_SMALL The MemoryMap buffer was too small. The current
+ buffer size needed to hold the memory map is
+ returned in MemoryMapSize.
+ @retval EFI_INVALID_PARAMETER One of the parameters has an invalid value.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmCoreGetMemoryMap (
+ IN OUT UINTN *MemoryMapSize,
+ IN OUT EFI_MEMORY_DESCRIPTOR *MemoryMap,
+ OUT UINTN *MapKey,
+ OUT UINTN *DescriptorSize,
+ OUT UINT32 *DescriptorVersion
+ );
+
+///
+/// For generic EFI machines make the default allocations 4K aligned
+///
+#define EFI_ACPI_RUNTIME_PAGE_ALLOCATION_ALIGNMENT (EFI_PAGE_SIZE)
+#define DEFAULT_PAGE_ALLOCATION (EFI_PAGE_SIZE)
+
extern UINTN mFullSmramRangeCount;
extern EFI_SMRAM_DESCRIPTOR *mFullSmramRanges;
+extern EFI_SMM_DRIVER_ENTRY *mSmmCoreDriverEntry;
+
extern EFI_LOADED_IMAGE_PROTOCOL *mSmmCoreLoadedImage;
//
diff --git a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf
index 1f73cbb..c256e90 100644
--- a/MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf
+++ b/MdeModulePkg/Core/PiSmmCore/PiSmmCore.inf
@@ -38,6 +38,7 @@
Smi.c
InstallConfigurationTable.c
SmramProfileRecord.c
+ MemoryAttributesTable.c
[Packages]
MdePkg/MdePkg.dec
@@ -96,6 +97,7 @@
gEdkiiMemoryProfileGuid
## SOMETIMES_PRODUCES ## GUID # Install protocol
gEdkiiSmmMemoryProfileGuid
+ gEdkiiPiSmmMemoryAttributesTableGuid ## SOMETIMES_PRODUCES ## SystemTable
[UserExtensions.TianoCore."ExtraFiles"]
PiSmmCoreExtra.uni
diff --git a/MdeModulePkg/Core/PiSmmCore/Pool.c b/MdeModulePkg/Core/PiSmmCore/Pool.c
index 02dab01..dcfd13e 100644
--- a/MdeModulePkg/Core/PiSmmCore/Pool.c
+++ b/MdeModulePkg/Core/PiSmmCore/Pool.c
@@ -86,8 +86,24 @@ SmmInitializeMemoryServices (
}
//
// Initialize free SMRAM regions
+ // Need add Free memory at first, to let gSmmMemoryMap record data
//
for (Index = 0; Index < SmramRangeCount; Index++) {
+ if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) {
+ continue;
+ }
+ SmmAddMemoryRegion (
+ SmramRanges[Index].CpuStart,
+ SmramRanges[Index].PhysicalSize,
+ EfiConventionalMemory,
+ SmramRanges[Index].RegionState
+ );
+ }
+
+ for (Index = 0; Index < SmramRangeCount; Index++) {
+ if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) == 0) {
+ continue;
+ }
SmmAddMemoryRegion (
SmramRanges[Index].CpuStart,
SmramRanges[Index].PhysicalSize,
--
2.7.4.windows.1
^ permalink raw reply related [flat|nested] 21+ messages in thread
* [PATCH V3 5/6] UefiCpuPkg/PiSmmCpuDxeSmm: Add paging protection.
2016-11-11 9:00 [PATCH V3 0/6] Enable SMM page level protection Jiewen Yao
` (3 preceding siblings ...)
2016-11-11 9:00 ` [PATCH V3 4/6] UefiCpuPkg/dec: Add PcdCpuSmmStaticPageTable Jiewen Yao
@ 2016-11-11 9:00 ` Jiewen Yao
2016-11-11 9:00 ` [PATCH V3 6/6] QuarkPlatformPkg/dsc: enable Smm " Jiewen Yao
` (2 subsequent siblings)
7 siblings, 0 replies; 21+ messages in thread
From: Jiewen Yao @ 2016-11-11 9:00 UTC (permalink / raw)
To: edk2-devel
Cc: Jeff Fan, Feng Tian, Star Zeng, Michael D Kinney, Laszlo Ersek,
Paolo Bonzini
PiSmmCpuDxeSmm consumes SmmAttributesTable and setup page table:
1) Code region is marked as read-only and Data region is non-executable,
if the PE image is 4K aligned.
2) Important data structure is set to RO, such as GDT/IDT.
3) SmmSaveState is set to non-executable,
and SmmEntrypoint is set to read-only.
4) If static page is supported, page table is read-only.
We use page table to protect other components, and itself.
If we use dynamic paging, we can still provide *partial* protection.
And hope page table is not modified by other components.
The XD enabling code is moved to SmiEntry to let NX take effect.
Cc: Jeff Fan <jeff.fan@intel.com>
Cc: Feng Tian <feng.tian@intel.com>
Cc: Star Zeng <star.zeng@intel.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Laszlo Ersek <lersek@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Jiewen Yao <jiewen.yao@intel.com>
---
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c | 71 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S | 75 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm | 75 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm | 79 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S | 226 +----
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm | 36 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm | 36 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c | 37 +-
UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c | 4 +-
UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c | 135 ++-
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c | 144 +++-
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h | 156 +++-
UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf | 5 +-
UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c | 871 ++++++++++++++++++++
UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c | 39 +-
UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h | 15 +-
UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c | 274 +++++-
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S | 59 +-
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm | 62 +-
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm | 69 +-
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.S | 250 +-----
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.asm | 35 +-
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm | 31 +-
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c | 30 +-
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c | 7 +-
25 files changed, 2044 insertions(+), 777 deletions(-)
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
index a871bef..65f09e5 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -58,7 +58,7 @@ SmmInitPageTable (
if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
InitializeIDTSmmStackGuard ();
}
- return Gen4GPageTable (0, TRUE);
+ return Gen4GPageTable (TRUE);
}
/**
@@ -99,7 +99,7 @@ SmiPFHandler (
if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
(PFAddress >= mCpuHotPlugData.SmrrBase) &&
(PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
- DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));
+ DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
CpuDeadLoop ();
}
@@ -109,7 +109,7 @@ SmiPFHandler (
if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
(PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {
- DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));
+ DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));
DEBUG_CODE (
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);
);
@@ -128,3 +128,68 @@ SmiPFHandler (
ReleaseSpinLock (mPFLock);
}
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ )
+{
+ UINTN Index2;
+ UINTN Index3;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ BOOLEAN IsSplitted;
+ BOOLEAN PageTableSplitted;
+
+ DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
+
+ //
+ // Disable write protection, because we need mark page table to be write protected.
+ // We need *write* page table memory, to mark itself to be *read only*.
+ //
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ do {
+ DEBUG ((DEBUG_INFO, "Start...\n"));
+ PageTableSplitted = FALSE;
+
+ L3PageTable = (UINT64 *)GetPageTableBase ();
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index3 = 0; Index3 < 4; Index3++) {
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ continue;
+ }
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
+ if (L1PageTable == NULL) {
+ continue;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+ }
+ } while (PageTableSplitted);
+
+ //
+ // Enable write protection, after page table updated.
+ //
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+
+ return ;
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
index ec5b9a0..0c07558 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.S
@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
-# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -24,9 +24,13 @@ ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
ASM_GLOBAL ASM_PFX(gSmiCr3)
ASM_GLOBAL ASM_PFX(gSmiStack)
ASM_GLOBAL ASM_PFX(gSmbase)
+ASM_GLOBAL ASM_PFX(mXdSupported)
ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)
+.equ MSR_EFER, 0xc0000080
+.equ MSR_EFER_XD, 0x800
+
.equ DSC_OFFSET, 0xfb00
.equ DSC_GDTPTR, 0x30
.equ DSC_GDTSIZ, 0x38
@@ -122,8 +126,41 @@ L11:
orl $BIT10, %eax
L12: # as cr4.PGE is not set here, refresh cr3
movl %eax, %cr4 # in PreModifyMtrrs() to flush TLB.
+
+ cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+ jz L5
+# Load TSS
+ movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
+ movl $TSS_SEGMENT, %eax
+ ltrw %ax
+L5:
+
+# enable NXE if supported
+ .byte 0xb0 # mov al, imm8
+ASM_PFX(mXdSupported): .byte 1
+ cmpb $0, %al
+ jz SkipNxe
+#
+# Check XD disable bit
+#
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ pushl %edx # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L13
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L13:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+SkipNxe:
+ subl $4, %esp
+NxeDone:
+
movl %cr0, %ebx
- orl $0x080010000, %ebx # enable paging + WP
+ orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
movl %ebx, %cr0
leal DSC_OFFSET(%edi),%ebx
movw DSC_DS(%ebx),%ax
@@ -135,35 +172,39 @@ L12: # as cr4.PGE is not set here, refresh
movw DSC_SS(%ebx),%ax
movl %eax, %ss
- cmpb $0, ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
- jz L5
-
-# Load TSS
- movb $0x89, (TSS_SEGMENT + 5)(%ebp) # clear busy flag
- movl $TSS_SEGMENT, %eax
- ltrw %ax
-L5:
-
# jmp _SmiHandler # instruction is not needed
_SmiHandler:
- movl (%esp), %ebx
+ movl 4(%esp), %ebx
pushl %ebx
movl $ASM_PFX(CpuSmmDebugEntry), %eax
call *%eax
- popl %ecx
-
+ addl $4, %esp
+
pushl %ebx
movl $ASM_PFX(SmiRendezvous), %eax
call *%eax
- popl %ecx
+ addl $4, %esp
pushl %ebx
movl $ASM_PFX(CpuSmmDebugExit), %eax
call *%eax
- popl %ecx
-
+ addl $4, %esp
+
+ movl $ASM_PFX(mXdSupported), %eax
+ movb (%eax), %al
+ cmpb $0, %al
+ jz L16
+ popl %edx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L16
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+L16:
rsm
ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
index ac1a9b4..eda1708 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.asm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -22,6 +22,10 @@
.model flat,C
.xmm
+MSR_IA32_MISC_ENABLE EQU 1A0h
+MSR_EFER EQU 0c0000080h
+MSR_EFER_XD EQU 0800h
+
DSC_OFFSET EQU 0fb00h
DSC_GDTPTR EQU 30h
DSC_GDTSIZ EQU 38h
@@ -43,6 +47,7 @@ EXTERNDEF gcSmiHandlerSize:WORD
EXTERNDEF gSmiCr3:DWORD
EXTERNDEF gSmiStack:DWORD
EXTERNDEF gSmbase:DWORD
+EXTERNDEF mXdSupported:BYTE
EXTERNDEF FeaturePcdGet (PcdCpuSmmStackGuard):BYTE
EXTERNDEF gSmiHandlerIdtr:FWORD
@@ -128,8 +133,42 @@ gSmiCr3 DD ?
or eax, BIT10
@@: ; as cr4.PGE is not set here, refresh cr3
mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+
+ cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
+ jz @F
+; Load TSS
+ mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+@@:
+
+; enable NXE if supported
+ DB 0b0h ; mov al, imm8
+mXdSupported DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 4
+@XdDone:
+
mov ebx, cr0
- or ebx, 080010000h ; enable paging + WP
+ or ebx, 080010023h ; enable paging + WP + NE + MP + PE
mov cr0, ebx
lea ebx, [edi + DSC_OFFSET]
mov ax, [ebx + DSC_DS]
@@ -141,34 +180,38 @@ gSmiCr3 DD ?
mov ax, [ebx + DSC_SS]
mov ss, eax
- cmp FeaturePcdGet (PcdCpuSmmStackGuard), 0
- jz @F
-
-; Load TSS
- mov byte ptr [ebp + TSS_SEGMENT + 5], 89h ; clear busy flag
- mov eax, TSS_SEGMENT
- ltr ax
-@@:
; jmp _SmiHandler ; instruction is not needed
_SmiHandler PROC
- mov ebx, [esp] ; CPU Index
-
+ mov ebx, [esp + 4] ; CPU Index
push ebx
mov eax, CpuSmmDebugEntry
call eax
- pop ecx
+ add esp, 4
push ebx
mov eax, SmiRendezvous
call eax
- pop ecx
-
+ add esp, 4
+
push ebx
mov eax, CpuSmmDebugExit
call eax
- pop ecx
+ add esp, 4
+ mov eax, mXdSupported
+ mov al, [eax]
+ cmp al, 0
+ jz @f
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+@@:
rsm
_SmiHandler ENDP
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
index 4fb0c13..d50a317 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
@@ -18,6 +18,10 @@
;
;-------------------------------------------------------------------------------
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
%define DSC_OFFSET 0xfb00
%define DSC_GDTPTR 0x30
%define DSC_GDTSIZ 0x38
@@ -40,6 +44,7 @@ global ASM_PFX(gcSmiHandlerSize)
global ASM_PFX(gSmiCr3)
global ASM_PFX(gSmiStack)
global ASM_PFX(gSmbase)
+global ASM_PFX(mXdSupported)
extern ASM_PFX(gSmiHandlerIdtr)
SECTION .text
@@ -56,7 +61,7 @@ _SmiEntryPoint:
mov ebp, eax ; ebp = GDT base
o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
mov ax, PROTECT_MODE_CS
- mov [cs:bx-0x2],ax
+ mov [cs:bx-0x2],ax
DB 0x66, 0xbf ; mov edi, SMBASE
ASM_PFX(gSmbase): DD 0
lea eax, [edi + (@32bit - _SmiEntryPoint) + 0x8000]
@@ -66,7 +71,7 @@ ASM_PFX(gSmbase): DD 0
or ebx, 0x23
mov cr0, ebx
jmp dword 0x0:0x0
-_GdtDesc:
+_GdtDesc:
DW 0
DD 0
@@ -115,8 +120,42 @@ ASM_PFX(gSmiCr3): DD 0
or eax, BIT10
.4: ; as cr4.PGE is not set here, refresh cr3
mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+
+ cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
+ jz .6
+; Load TSS
+ mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+.6:
+
+; enable NXE if supported
+ DB 0b0h ; mov al, imm8
+ASM_PFX(mXdSupported): DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .5
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.5:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 4
+@XdDone:
+
mov ebx, cr0
- or ebx, 0x080010000 ; enable paging + WP
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
mov cr0, ebx
lea ebx, [edi + DSC_OFFSET]
mov ax, [ebx + DSC_DS]
@@ -128,35 +167,39 @@ ASM_PFX(gSmiCr3): DD 0
mov ax, [ebx + DSC_SS]
mov ss, eax
- cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
- jz .5
-
-; Load TSS
- mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
- mov eax, TSS_SEGMENT
- ltr ax
-.5:
; jmp _SmiHandler ; instruction is not needed
global ASM_PFX(SmiHandler)
ASM_PFX(SmiHandler):
- mov ebx, [esp] ; CPU Index
-
+ mov ebx, [esp + 4] ; CPU Index
push ebx
mov eax, ASM_PFX(CpuSmmDebugEntry)
call eax
- pop ecx
+ add esp, 4
push ebx
mov eax, ASM_PFX(SmiRendezvous)
call eax
- pop ecx
-
+ add esp, 4
+
push ebx
mov eax, ASM_PFX(CpuSmmDebugExit)
call eax
- pop ecx
-
+ add esp, 4
+
+ mov eax, ASM_PFX(mXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz .7
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .7
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.7:
rsm
ASM_PFX(gcSmiHandlerSize): DW $ - _SmiEntryPoint
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S
index 4130bf5..cf5ef82 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.S
@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
-# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -24,6 +24,7 @@ ASM_GLOBAL ASM_PFX(PageFaultStubFunction)
ASM_GLOBAL ASM_PFX(gSmiMtrrs)
ASM_GLOBAL ASM_PFX(gcSmiIdtr)
ASM_GLOBAL ASM_PFX(gcSmiGdtr)
+ASM_GLOBAL ASM_PFX(gTaskGateDescriptor)
ASM_GLOBAL ASM_PFX(gcPsd)
ASM_GLOBAL ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
@@ -236,207 +237,10 @@ ASM_PFX(gcPsd):
ASM_PFX(gcSmiGdtr): .word GDT_SIZE - 1
.long NullSeg
-ASM_PFX(gcSmiIdtr): .word IDT_SIZE - 1
- .long _SmiIDT
-
-_SmiIDT:
-# The following segment repeats 32 times:
-# No. 1
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 2
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 3
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 4
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 5
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 6
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 7
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 8
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 9
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 10
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 11
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 12
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 13
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 14
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 15
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 16
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 17
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 18
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 19
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 20
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 21
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 22
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 23
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 24
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 25
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 26
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 27
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 28
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 29
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 30
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 31
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-# No. 32
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
-
-.equ IDT_SIZE, . - _SmiIDT
-
-TaskGateDescriptor:
+ASM_PFX(gcSmiIdtr): .word 0
+ .long 0
+
+ASM_PFX(gTaskGateDescriptor):
.word 0 # Reserved
.word EXCEPTION_TSS_SEL # TSS Segment selector
.byte 0 # Reserved
@@ -891,21 +695,3 @@ ASM_PFX(PageFaultStubFunction):
#
clts
iret
-
-ASM_GLOBAL ASM_PFX(InitializeIDTSmmStackGuard)
-ASM_PFX(InitializeIDTSmmStackGuard):
- pushl %ebx
-#
-# If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
-# is a Task Gate Descriptor so that when a Page Fault Exception occurs,
-# the processors can use a known good stack in case stack ran out.
-#
- leal _SmiIDT + 14 * 8, %ebx
- leal TaskGateDescriptor, %edx
- movl (%edx), %eax
- movl %eax, (%ebx)
- movl 4(%edx), %eax
- movl %eax, 4(%ebx)
-
- popl %ebx
- ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm
index b4eb492..7b162f8 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.asm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -26,6 +26,7 @@ EXTERNDEF PageFaultStubFunction:PROC
EXTERNDEF gSmiMtrrs:QWORD
EXTERNDEF gcSmiIdtr:FWORD
EXTERNDEF gcSmiGdtr:FWORD
+EXTERNDEF gTaskGateDescriptor:QWORD
EXTERNDEF gcPsd:BYTE
EXTERNDEF FeaturePcdGet (PcdCpuSmmProfileEnable):BYTE
@@ -252,20 +253,10 @@ gcSmiGdtr LABEL FWORD
DD offset NullSeg
gcSmiIdtr LABEL FWORD
- DW IDT_SIZE - 1
- DD offset _SmiIDT
-
-_SmiIDT LABEL QWORD
-REPEAT 32
- DW 0 ; Offset 0:15
- DW CODE_SEL ; Segment selector
- DB 0 ; Unused
- DB 8eh ; Interrupt Gate, Present
- DW 0 ; Offset 16:31
- ENDM
-IDT_SIZE = $ - offset _SmiIDT
-
-TaskGateDescriptor LABEL DWORD
+ DW 0
+ DD 0
+
+gTaskGateDescriptor LABEL QWORD
DW 0 ; Reserved
DW EXCEPTION_TSS_SEL ; TSS Segment selector
DB 0 ; Reserved
@@ -720,19 +711,4 @@ PageFaultStubFunction PROC
iretd
PageFaultStubFunction ENDP
-InitializeIDTSmmStackGuard PROC USES ebx
-;
-; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
-; is a Task Gate Descriptor so that when a Page Fault Exception occurs,
-; the processors can use a known good stack in case stack is ran out.
-;
- lea ebx, _SmiIDT + 14 * 8
- lea edx, TaskGateDescriptor
- mov eax, [edx]
- mov [ebx], eax
- mov eax, [edx + 4]
- mov [ebx + 4], eax
- ret
-InitializeIDTSmmStackGuard ENDP
-
END
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
index 6a32828..4d58999 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -24,6 +24,7 @@ extern ASM_PFX(SmiPFHandler)
global ASM_PFX(gcSmiIdtr)
global ASM_PFX(gcSmiGdtr)
+global ASM_PFX(gTaskGateDescriptor)
global ASM_PFX(gcPsd)
SECTION .data
@@ -250,21 +251,10 @@ ASM_PFX(gcSmiGdtr):
DD NullSeg
ASM_PFX(gcSmiIdtr):
- DW IDT_SIZE - 1
- DD _SmiIDT
+ DW 0
+ DD 0
-_SmiIDT:
-%rep 32
- DW 0 ; Offset 0:15
- DW CODE_SEL ; Segment selector
- DB 0 ; Unused
- DB 0x8e ; Interrupt Gate, Present
- DW 0 ; Offset 16:31
-%endrep
-
-IDT_SIZE equ $ - _SmiIDT
-
-TaskGateDescriptor:
+ASM_PFX(gTaskGateDescriptor):
DW 0 ; Reserved
DW EXCEPTION_TSS_SEL ; TSS Segment selector
DB 0 ; Reserved
@@ -717,19 +707,3 @@ ASM_PFX(PageFaultStubFunction):
clts
iretd
-global ASM_PFX(InitializeIDTSmmStackGuard)
-ASM_PFX(InitializeIDTSmmStackGuard):
- push ebx
-;
-; If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
-; is a Task Gate Descriptor so that when a Page Fault Exception occurrs,
-; the processors can use a known good stack in case stack is ran out.
-;
- lea ebx, [_SmiIDT + 14 * 8]
- lea edx, [TaskGateDescriptor]
- mov eax, [edx]
- mov [ebx], eax
- mov eax, [edx + 4]
- mov [ebx + 4], eax
- pop ebx
- ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
index 545b534..e87bf7b 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
@@ -1,7 +1,7 @@
/** @file
SMM CPU misc functions for Ia32 arch specific.
-Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -14,6 +14,33 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include "PiSmmCpuDxeSmm.h"
+extern UINT64 gTaskGateDescriptor;
+
+EFI_PHYSICAL_ADDRESS mGdtBuffer;
+UINTN mGdtBufferSize;
+
+/**
+ Initialize IDT for SMM Stack Guard.
+
+**/
+VOID
+EFIAPI
+InitializeIDTSmmStackGuard (
+ VOID
+ )
+{
+ IA32_IDT_GATE_DESCRIPTOR *IdtGate;
+
+ //
+ // If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
+ // is a Task Gate Descriptor so that when a Page Fault Exception occurs,
+ // the processors can use a known good stack in case stack is ran out.
+ //
+ IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
+ IdtGate += EXCEPT_IA32_PAGE_FAULT;
+ IdtGate->Uint64 = gTaskGateDescriptor;
+}
+
/**
Initialize Gdt for all processors.
@@ -49,8 +76,10 @@ InitGdt (
gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
@@ -82,8 +111,10 @@ InitGdt (
// Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
//
GdtTssTableSize = gcSmiGdtr.Limit + 1;
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
index 767cb69..724cd92 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
@@ -1,7 +1,7 @@
/** @file
IA-32 processor specific functions to enable SMM profile.
-Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -24,7 +24,7 @@ InitSmmS3Cr3 (
VOID
)
{
- mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (0, TRUE);
+ mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (TRUE);
return ;
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
index 12466ef..9b8db90 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c
@@ -734,14 +734,12 @@ APHandler (
/**
Create 4G PageTable in SMRAM.
- @param ExtraPages Additional page numbers besides for 4G memory
- @param Is32BitPageTable Whether the page table is 32-bit PAE
+ @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
@return PageTable Address
**/
UINT32
Gen4GPageTable (
- IN UINTN ExtraPages,
IN BOOLEAN Is32BitPageTable
)
{
@@ -775,10 +773,10 @@ Gen4GPageTable (
//
// Allocate the page table
//
- PageTable = AllocatePageTableMemory (ExtraPages + 5 + PagesNeeded);
+ PageTable = AllocatePageTableMemory (5 + PagesNeeded);
ASSERT (PageTable != NULL);
- PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages));
+ PageTable = (VOID *)((UINTN)PageTable);
Pte = (UINT64*)PageTable;
//
@@ -903,13 +901,13 @@ SetCacheability (
PageTable[PTIndex] |= (UINT64)Cacheability;
}
-
/**
Schedule a procedure to run on the specified CPU.
- @param Procedure The address of the procedure to run
- @param CpuIndex Target CPU Index
- @param ProcArguments The parameter to pass to the procedure
+ @param[in] Procedure The address of the procedure to run
+ @param[in] CpuIndex Target CPU Index
+ @param[in, OUT] ProcArguments The parameter to pass to the procedure
+ @param[in] BlockingMode Startup AP in blocking mode or not
@retval EFI_INVALID_PARAMETER CpuNumber not valid
@retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
@@ -919,26 +917,48 @@ SetCacheability (
**/
EFI_STATUS
-EFIAPI
-SmmStartupThisAp (
+InternalSmmStartupThisAp (
IN EFI_AP_PROCEDURE Procedure,
IN UINTN CpuIndex,
- IN OUT VOID *ProcArguments OPTIONAL
+ IN OUT VOID *ProcArguments OPTIONAL,
+ IN BOOLEAN BlockingMode
)
{
- if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus ||
- CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu ||
- !(*(mSmmMpSyncData->CpuData[CpuIndex].Present)) ||
- gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove ||
- !AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
+ if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus) {
+ DEBUG((DEBUG_ERROR, "CpuIndex(%d) >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus(%d)\n", CpuIndex, gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ return EFI_INVALID_PARAMETER;
+ }
+ if (CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) {
+ DEBUG((DEBUG_ERROR, "CpuIndex(%d) == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu\n", CpuIndex));
return EFI_INVALID_PARAMETER;
}
+ if (!(*(mSmmMpSyncData->CpuData[CpuIndex].Present))) {
+ if (mSmmMpSyncData->EffectiveSyncMode == SmmCpuSyncModeTradition) {
+ DEBUG((DEBUG_ERROR, "!mSmmMpSyncData->CpuData[%d].Present\n", CpuIndex));
+ }
+ return EFI_INVALID_PARAMETER;
+ }
+ if (gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove) {
+ if (!FeaturePcdGet (PcdCpuHotPlugSupport)) {
+ DEBUG((DEBUG_ERROR, "gSmmCpuPrivate->Operation[%d] == SmmCpuRemove\n", CpuIndex));
+ }
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (BlockingMode) {
+ AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
+ } else {
+ if (!AcquireSpinLockOrFail (mSmmMpSyncData->CpuData[CpuIndex].Busy)) {
+ DEBUG((DEBUG_ERROR, "mSmmMpSyncData->CpuData[%d].Busy\n", CpuIndex));
+ return EFI_INVALID_PARAMETER;
+ }
+ }
mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure;
mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments;
ReleaseSemaphore (mSmmMpSyncData->CpuData[CpuIndex].Run);
- if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) {
+ if (BlockingMode) {
AcquireSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
ReleaseSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
}
@@ -946,6 +966,56 @@ SmmStartupThisAp (
}
/**
+ Schedule a procedure to run on the specified CPU in blocking mode.
+
+ @param[in] Procedure The address of the procedure to run
+ @param[in] CpuIndex Target CPU Index
+ @param[in, out] ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmBlockingStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ )
+{
+ return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, TRUE);
+}
+
+/**
+ Schedule a procedure to run on the specified CPU.
+
+ @param Procedure The address of the procedure to run
+ @param CpuIndex Target CPU Index
+ @param ProcArguments The parameter to pass to the procedure
+
+ @retval EFI_INVALID_PARAMETER CpuNumber not valid
+ @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM
+ @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy
+ @retval EFI_SUCCESS The procedure has been successfully scheduled
+
+**/
+EFI_STATUS
+EFIAPI
+SmmStartupThisAp (
+ IN EFI_AP_PROCEDURE Procedure,
+ IN UINTN CpuIndex,
+ IN OUT VOID *ProcArguments OPTIONAL
+ )
+{
+ return InternalSmmStartupThisAp(Procedure, CpuIndex, ProcArguments, FeaturePcdGet (PcdCpuSmmBlockStartupThisAp));
+}
+
+/**
This function sets DR6 & DR7 according to SMM save state, before running SMM C code.
They are useful when you want to enable hardware breakpoints in SMM without entry SMM mode.
@@ -964,6 +1034,7 @@ CpuSmmDebugEntry (
SMRAM_SAVE_STATE_MAP *CpuSaveState;
if (FeaturePcdGet (PcdCpuSmmDebug)) {
+ ASSERT(CpuIndex < mMaxNumberOfCpus);
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
AsmWriteDr6 (CpuSaveState->x86._DR6);
@@ -993,6 +1064,7 @@ CpuSmmDebugExit (
SMRAM_SAVE_STATE_MAP *CpuSaveState;
if (FeaturePcdGet (PcdCpuSmmDebug)) {
+ ASSERT(CpuIndex < mMaxNumberOfCpus);
CpuSaveState = (SMRAM_SAVE_STATE_MAP *)gSmmCpuPrivate->CpuSaveState[CpuIndex];
if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) {
CpuSaveState->x86._DR7 = (UINT32)AsmReadDr7 ();
@@ -1022,8 +1094,8 @@ SmiRendezvous (
BOOLEAN BspInProgress;
UINTN Index;
UINTN Cr2;
- BOOLEAN XdDisableFlag;
- MSR_IA32_MISC_ENABLE_REGISTER MiscEnableMsr;
+
+ ASSERT(CpuIndex < mMaxNumberOfCpus);
//
// Save Cr2 because Page Fault exception in SMM may override its value
@@ -1082,20 +1154,6 @@ SmiRendezvous (
InitializeSpinLock (mSmmMpSyncData->CpuData[CpuIndex].Busy);
}
- //
- // Try to enable XD
- //
- XdDisableFlag = FALSE;
- if (mXdSupported) {
- MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
- if (MiscEnableMsr.Bits.XD == 1) {
- XdDisableFlag = TRUE;
- MiscEnableMsr.Bits.XD = 0;
- AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);
- }
- ActivateXd ();
- }
-
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
ActivateSmmProfile (CpuIndex);
}
@@ -1176,15 +1234,6 @@ SmiRendezvous (
//
while (*mSmmMpSyncData->AllCpusInSync) {
CpuPause ();
- }
-
- //
- // Restore XD
- //
- if (XdDisableFlag) {
- MiscEnableMsr.Uint64 = AsmReadMsr64 (MSR_IA32_MISC_ENABLE);
- MiscEnableMsr.Bits.XD = 1;
- AsmWriteMsr64 (MSR_IA32_MISC_ENABLE, MiscEnableMsr.Uint64);
}
}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
index 852b5c7..d901348 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c
@@ -113,6 +113,19 @@ InitializeSmmIdt (
EFI_STATUS Status;
BOOLEAN InterruptState;
IA32_DESCRIPTOR DxeIdtr;
+
+ //
+ // There are 32 (not 255) entries in it since only processor
+ // generated exceptions will be handled.
+ //
+ gcSmiIdtr.Limit = (sizeof(IA32_IDT_GATE_DESCRIPTOR) * 32) - 1;
+ //
+ // Allocate page aligned IDT, because it might be set as read only.
+ //
+ gcSmiIdtr.Base = (UINTN)AllocateCodePages (EFI_SIZE_TO_PAGES(gcSmiIdtr.Limit + 1));
+ ASSERT (gcSmiIdtr.Base != 0);
+ ZeroMem ((VOID *)gcSmiIdtr.Base, gcSmiIdtr.Limit + 1);
+
//
// Disable Interrupt and save DXE IDT table
//
@@ -731,9 +744,9 @@ PiCpuSmmEntry (
//
BufferPages = EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1));
if ((FamilyId == 4) || (FamilyId == 5)) {
- Buffer = AllocateAlignedPages (BufferPages, SIZE_32KB);
+ Buffer = AllocateAlignedCodePages (BufferPages, SIZE_32KB);
} else {
- Buffer = AllocateAlignedPages (BufferPages, SIZE_4KB);
+ Buffer = AllocateAlignedCodePages (BufferPages, SIZE_4KB);
}
ASSERT (Buffer != NULL);
DEBUG ((EFI_D_INFO, "SMRAM SaveState Buffer (0x%08x, 0x%08x)\n", Buffer, EFI_PAGES_TO_SIZE(BufferPages)));
@@ -842,6 +855,8 @@ PiCpuSmmEntry (
//
SmmCpuFeaturesSmmRelocationComplete ();
+ DEBUG ((DEBUG_INFO, "mXdSupported - 0x%x\n", mXdSupported));
+
//
// SMM Time initialization
//
@@ -1137,6 +1152,17 @@ ConfigSmmCodeAccessCheck (
}
/**
+ Set code region to be read only and data region to be execute disable.
+**/
+VOID
+SetRegionAttributes (
+ VOID
+ )
+{
+ SetMemMapAttributes ();
+}
+
+/**
This API provides a way to allocate memory for page table.
This API can be called more once to allocate memory for page tables.
@@ -1166,6 +1192,109 @@ AllocatePageTableMemory (
}
/**
+ Allocate pages for code.
+
+ @param[in] Pages Number of pages to be allocated.
+
+ @return Allocated memory.
+**/
+VOID *
+AllocateCodePages (
+ IN UINTN Pages
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS Memory;
+
+ if (Pages == 0) {
+ return NULL;
+ }
+
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
+ if (EFI_ERROR (Status)) {
+ return NULL;
+ }
+ return (VOID *) (UINTN) Memory;
+}
+
+/**
+ Allocate aligned pages for code.
+
+ @param[in] Pages Number of pages to be allocated.
+ @param[in] Alignment The requested alignment of the allocation.
+ Must be a power of two.
+ If Alignment is zero, then byte alignment is used.
+
+ @return Allocated memory.
+**/
+VOID *
+AllocateAlignedCodePages (
+ IN UINTN Pages,
+ IN UINTN Alignment
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS Memory;
+ UINTN AlignedMemory;
+ UINTN AlignmentMask;
+ UINTN UnalignedPages;
+ UINTN RealPages;
+
+ //
+ // Alignment must be a power of two or zero.
+ //
+ ASSERT ((Alignment & (Alignment - 1)) == 0);
+
+ if (Pages == 0) {
+ return NULL;
+ }
+ if (Alignment > EFI_PAGE_SIZE) {
+ //
+ // Calculate the total number of pages since alignment is larger than page size.
+ //
+ AlignmentMask = Alignment - 1;
+ RealPages = Pages + EFI_SIZE_TO_PAGES (Alignment);
+ //
+ // Make sure that Pages plus EFI_SIZE_TO_PAGES (Alignment) does not overflow.
+ //
+ ASSERT (RealPages > Pages);
+
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, RealPages, &Memory);
+ if (EFI_ERROR (Status)) {
+ return NULL;
+ }
+ AlignedMemory = ((UINTN) Memory + AlignmentMask) & ~AlignmentMask;
+ UnalignedPages = EFI_SIZE_TO_PAGES (AlignedMemory - (UINTN) Memory);
+ if (UnalignedPages > 0) {
+ //
+ // Free first unaligned page(s).
+ //
+ Status = gSmst->SmmFreePages (Memory, UnalignedPages);
+ ASSERT_EFI_ERROR (Status);
+ }
+ Memory = (EFI_PHYSICAL_ADDRESS) (AlignedMemory + EFI_PAGES_TO_SIZE (Pages));
+ UnalignedPages = RealPages - Pages - UnalignedPages;
+ if (UnalignedPages > 0) {
+ //
+ // Free last unaligned page(s).
+ //
+ Status = gSmst->SmmFreePages (Memory, UnalignedPages);
+ ASSERT_EFI_ERROR (Status);
+ }
+ } else {
+ //
+ // Do not over-allocate pages in this case.
+ //
+ Status = gSmst->SmmAllocatePages (AllocateAnyPages, EfiRuntimeServicesCode, Pages, &Memory);
+ if (EFI_ERROR (Status)) {
+ return NULL;
+ }
+ AlignedMemory = (UINTN) Memory;
+ }
+ return (VOID *) AlignedMemory;
+}
+
+/**
Perform the remaining tasks.
**/
@@ -1185,6 +1314,17 @@ PerformRemainingTasks (
// Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable.
//
InitPaging ();
+
+ //
+ // Mark critical region to be read-only in page table
+ //
+ SetRegionAttributes ();
+
+ //
+ // Set page table itself to be read-only
+ //
+ SetPageTableAttributes ();
+
//
// Configure SMM Code Access Check feature if available.
//
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
index 9b119c8..6a1582b 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h
@@ -25,6 +25,7 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include <Protocol/SmmCpuService.h>
#include <Guid/AcpiS3Context.h>
+#include <Guid/PiSmmMemoryAttributesTable.h>
#include <Library/BaseLib.h>
#include <Library/IoLib.h>
@@ -83,13 +84,38 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#define IA32_PG_PMNT BIT62
#define IA32_PG_NX BIT63
-#define PAGE_ATTRIBUTE_BITS (IA32_PG_RW | IA32_PG_P)
+#define PAGE_ATTRIBUTE_BITS (IA32_PG_D | IA32_PG_A | IA32_PG_U | IA32_PG_RW | IA32_PG_P)
//
// Bits 1, 2, 5, 6 are reserved in the IA32 PAE PDPTE
// X64 PAE PDPTE does not have such restriction
//
#define IA32_PAE_PDPTE_ATTRIBUTE_BITS (IA32_PG_P)
+#define PAGE_PROGATE_BITS (IA32_PG_NX | PAGE_ATTRIBUTE_BITS)
+
+#define PAGING_4K_MASK 0xFFF
+#define PAGING_2M_MASK 0x1FFFFF
+#define PAGING_1G_MASK 0x3FFFFFFF
+
+#define PAGING_PAE_INDEX_MASK 0x1FF
+
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
+
+typedef enum {
+ PageNone,
+ Page4K,
+ Page2M,
+ Page1G,
+} PAGE_ATTRIBUTE;
+
+typedef struct {
+ PAGE_ATTRIBUTE Attribute;
+ UINT64 Length;
+ UINT64 AddressMask;
+} PAGE_ATTRIBUTE_TABLE;
+
//
// Size of Task-State Segment defined in IA32 Manual
//
@@ -98,6 +124,8 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#define TSS_IA32_CR3_OFFSET 28
#define TSS_IA32_ESP_OFFSET 56
+#define CR0_WP BIT16
+
//
// Code select value
//
@@ -395,6 +423,8 @@ typedef struct {
} SMM_CPU_SEMAPHORES;
extern IA32_DESCRIPTOR gcSmiGdtr;
+extern EFI_PHYSICAL_ADDRESS mGdtBuffer;
+extern UINTN mGdtBufferSize;
extern IA32_DESCRIPTOR gcSmiIdtr;
extern VOID *gcSmiIdtrPtr;
extern CONST PROCESSOR_SMM_DESCRIPTOR gcPsd;
@@ -414,14 +444,12 @@ extern SPIN_LOCK *mMemoryMappedLock;
/**
Create 4G PageTable in SMRAM.
- @param ExtraPages Additional page numbers besides for 4G memory
- @param Is32BitPageTable Whether the page table is 32-bit PAE
+ @param[in] Is32BitPageTable Whether the page table is 32-bit PAE
@return PageTable Address
**/
UINT32
Gen4GPageTable (
- IN UINTN ExtraPages,
IN BOOLEAN Is32BitPageTable
);
@@ -482,7 +510,7 @@ InitializeIDTSmmStackGuard (
/**
Initialize Gdt for all processors.
-
+
@param[in] Cr3 CR3 value.
@param[out] GdtStepSize The step size for GDT table.
@@ -761,6 +789,96 @@ DumpModuleInfoByIp (
);
/**
+ This function sets memory attribute according to MemoryAttributesTable.
+**/
+VOID
+SetMemMapAttributes (
+ VOID
+ );
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ );
+
+/**
+ Return page table base.
+
+ @return page table base.
+**/
+UINTN
+GetPageTableBase (
+ VOID
+ );
+
+/**
+ This function sets the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to set for the memory region.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSetMemoryAttributesEx (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *IsSplitted OPTIONAL
+ );
+
+/**
+ This function clears the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to clear for the memory region.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmClearMemoryAttributesEx (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *IsSplitted OPTIONAL
+ );
+
+/**
This API provides a way to allocate memory for page table.
This API can be called more once to allocate memory for page tables.
@@ -780,6 +898,34 @@ AllocatePageTableMemory (
IN UINTN Pages
);
+/**
+ Allocate pages for code.
+
+ @param[in] Pages Number of pages to be allocated.
+
+ @return Allocated memory.
+**/
+VOID *
+AllocateCodePages (
+ IN UINTN Pages
+ );
+
+/**
+ Allocate aligned pages for code.
+
+ @param[in] Pages Number of pages to be allocated.
+ @param[in] Alignment The requested alignment of the allocation.
+ Must be a power of two.
+ If Alignment is zero, then byte alignment is used.
+
+ @return Allocated memory.
+**/
+VOID *
+AllocateAlignedCodePages (
+ IN UINTN Pages,
+ IN UINTN Alignment
+ );
+
//
// S3 related global variable and function prototype.
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
index 5d598d6..d409edf 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf
@@ -4,7 +4,7 @@
# This SMM driver performs SMM initialization, deploy SMM Entry Vector,
# provides CPU specific services in SMM.
#
-# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -44,6 +44,7 @@
SmmProfile.h
SmmProfileInternal.h
SmramSaveState.c
+ SmmCpuMemoryManagement.c
[Sources.Ia32]
Ia32/Semaphore.c
@@ -133,6 +134,7 @@
gEfiGlobalVariableGuid ## SOMETIMES_PRODUCES ## Variable:L"SmmProfileData"
gEfiAcpi20TableGuid ## SOMETIMES_CONSUMES ## SystemTable
gEfiAcpi10TableGuid ## SOMETIMES_CONSUMES ## SystemTable
+ gEdkiiPiSmmMemoryAttributesTableGuid ## CONSUMES ## SystemTable
[FeaturePcd]
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmDebug ## CONSUMES
@@ -153,6 +155,7 @@
gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress ## SOMETIMES_PRODUCES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable ## CONSUMES
gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES
+ gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStaticPageTable ## CONSUMES
gEfiMdeModulePkgTokenSpaceGuid.PcdAcpiS3Enable ## CONSUMES
[Depex]
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
new file mode 100644
index 0000000..710287c
--- /dev/null
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmCpuMemoryManagement.c
@@ -0,0 +1,871 @@
+/** @file
+
+Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>
+This program and the accompanying materials
+are licensed and made available under the terms and conditions of the BSD License
+which accompanies this distribution. The full text of the license may be found at
+http://opensource.org/licenses/bsd-license.php
+
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+#define NEXT_MEMORY_DESCRIPTOR(MemoryDescriptor, Size) \
+ ((EFI_MEMORY_DESCRIPTOR *)((UINT8 *)(MemoryDescriptor) + (Size)))
+
+PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
+ {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64},
+ {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64},
+ {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64},
+};
+
+/**
+ Return page table base.
+
+ @return page table base.
+**/
+UINTN
+GetPageTableBase (
+ VOID
+ )
+{
+ return (AsmReadCr3 () & PAGING_4K_ADDRESS_MASK_64);
+}
+
+/**
+ Return length according to page attributes.
+
+ @param[in] PageAttributes The page attribute of the page entry.
+
+ @return The length of page entry.
+**/
+UINTN
+PageAttributeToLength (
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINTN Index;
+ for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
+ if (PageAttribute == mPageAttributeTable[Index].Attribute) {
+ return (UINTN)mPageAttributeTable[Index].Length;
+ }
+ }
+ return 0;
+}
+
+/**
+ Return address mask according to page attributes.
+
+ @param[in] PageAttributes The page attribute of the page entry.
+
+ @return The address mask of page entry.
+**/
+UINTN
+PageAttributeToMask (
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINTN Index;
+ for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
+ if (PageAttribute == mPageAttributeTable[Index].Attribute) {
+ return (UINTN)mPageAttributeTable[Index].AddressMask;
+ }
+ }
+ return 0;
+}
+
+/**
+ Return page table entry to match the address.
+
+ @param[in] Address The address to be checked.
+ @param[out] PageAttributes The page attribute of the page entry.
+
+ @return The page entry.
+**/
+VOID *
+GetPageTableEntry (
+ IN PHYSICAL_ADDRESS Address,
+ OUT PAGE_ATTRIBUTE *PageAttribute
+ )
+{
+ UINTN Index1;
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+
+ Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_PAE_INDEX_MASK;
+ Index3 = ((UINTN)Address >> 30) & PAGING_PAE_INDEX_MASK;
+ Index2 = ((UINTN)Address >> 21) & PAGING_PAE_INDEX_MASK;
+ Index1 = ((UINTN)Address >> 12) & PAGING_PAE_INDEX_MASK;
+
+ if (sizeof(UINTN) == sizeof(UINT64)) {
+ L4PageTable = (UINT64 *)GetPageTableBase ();
+ if (L4PageTable[Index4] == 0) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+
+ L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
+ } else {
+ L3PageTable = (UINT64 *)GetPageTableBase ();
+ }
+ if (L3PageTable[Index3] == 0) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
+ // 1G
+ *PageAttribute = Page1G;
+ return &L3PageTable[Index3];
+ }
+
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable[Index2] == 0) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ *PageAttribute = Page2M;
+ return &L2PageTable[Index2];
+ }
+
+ // 4k
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
+ if ((L1PageTable[Index1] == 0) && (Address != 0)) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ *PageAttribute = Page4K;
+ return &L1PageTable[Index1];
+}
+
+/**
+ Return memory attributes of page entry.
+
+ @param[in] PageEntry The page entry.
+
+ @return Memory attributes of page entry.
+**/
+UINT64
+GetAttributesFromPageEntry (
+ IN UINT64 *PageEntry
+ )
+{
+ UINT64 Attributes;
+ Attributes = 0;
+ if ((*PageEntry & IA32_PG_P) == 0) {
+ Attributes |= EFI_MEMORY_RP;
+ }
+ if ((*PageEntry & IA32_PG_RW) == 0) {
+ Attributes |= EFI_MEMORY_RO;
+ }
+ if ((*PageEntry & IA32_PG_NX) != 0) {
+ Attributes |= EFI_MEMORY_XP;
+ }
+ return Attributes;
+}
+
+/**
+ Modify memory attributes of page entry.
+
+ @param[in] PageEntry The page entry.
+ @param[in] Attributes The bit mask of attributes to modify for the memory region.
+ @param[in] IsSet TRUE means to set attributes. FALSE means to clear attributes.
+ @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
+**/
+VOID
+ConvertPageEntryAttribute (
+ IN UINT64 *PageEntry,
+ IN UINT64 Attributes,
+ IN BOOLEAN IsSet,
+ OUT BOOLEAN *IsModified
+ )
+{
+ UINT64 CurrentPageEntry;
+ UINT64 NewPageEntry;
+
+ CurrentPageEntry = *PageEntry;
+ NewPageEntry = CurrentPageEntry;
+ if ((Attributes & EFI_MEMORY_RP) != 0) {
+ if (IsSet) {
+ NewPageEntry &= ~(UINT64)IA32_PG_P;
+ } else {
+ NewPageEntry |= IA32_PG_P;
+ }
+ }
+ if ((Attributes & EFI_MEMORY_RO) != 0) {
+ if (IsSet) {
+ NewPageEntry &= ~(UINT64)IA32_PG_RW;
+ } else {
+ NewPageEntry |= IA32_PG_RW;
+ }
+ }
+ if ((Attributes & EFI_MEMORY_XP) != 0) {
+ if (IsSet) {
+ NewPageEntry |= IA32_PG_NX;
+ } else {
+ NewPageEntry &= ~IA32_PG_NX;
+ }
+ }
+ *PageEntry = NewPageEntry;
+ if (CurrentPageEntry != NewPageEntry) {
+ *IsModified = TRUE;
+ DEBUG ((DEBUG_VERBOSE, "ConvertPageEntryAttribute 0x%lx", CurrentPageEntry));
+ DEBUG ((DEBUG_VERBOSE, "->0x%lx\n", NewPageEntry));
+ } else {
+ *IsModified = FALSE;
+ }
+}
+
+/**
+ This function returns if there is need to split page entry.
+
+ @param[in] BaseAddress The base address to be checked.
+ @param[in] Length The length to be checked.
+ @param[in] PageEntry The page entry to be checked.
+ @param[in] PageAttribute The page attribute of the page entry.
+
+ @retval SplitAttributes on if there is need to split page entry.
+**/
+PAGE_ATTRIBUTE
+NeedSplitPage (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 *PageEntry,
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINT64 PageEntryLength;
+
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+
+ if (((BaseAddress & (PageEntryLength - 1)) == 0) && (Length >= PageEntryLength)) {
+ return PageNone;
+ }
+
+ if (((BaseAddress & PAGING_2M_MASK) != 0) || (Length < SIZE_2MB)) {
+ return Page4K;
+ }
+
+ return Page2M;
+}
+
+/**
+ This function splits one page entry to small page entries.
+
+ @param[in] PageEntry The page entry to be splitted.
+ @param[in] PageAttribute The page attribute of the page entry.
+ @param[in] SplitAttribute How to split the page entry.
+
+ @retval RETURN_SUCCESS The page entry is splitted.
+ @retval RETURN_UNSUPPORTED The page entry does not support to be splitted.
+ @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
+**/
+RETURN_STATUS
+SplitPage (
+ IN UINT64 *PageEntry,
+ IN PAGE_ATTRIBUTE PageAttribute,
+ IN PAGE_ATTRIBUTE SplitAttribute
+ )
+{
+ UINT64 BaseAddress;
+ UINT64 *NewPageEntry;
+ UINTN Index;
+
+ ASSERT (PageAttribute == Page2M || PageAttribute == Page1G);
+
+ if (PageAttribute == Page2M) {
+ //
+ // Split 2M to 4K
+ //
+ ASSERT (SplitAttribute == Page4K);
+ if (SplitAttribute == Page4K) {
+ NewPageEntry = AllocatePageTableMemory (1);
+ DEBUG ((DEBUG_INFO, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = *PageEntry & PAGING_2M_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = BaseAddress + SIZE_4KB * Index + ((*PageEntry) & PAGE_PROGATE_BITS);
+ }
+ (*PageEntry) = (UINT64)(UINTN)NewPageEntry + ((*PageEntry) & PAGE_PROGATE_BITS);
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else if (PageAttribute == Page1G) {
+ //
+ // Split 1G to 2M
+ // No need support 1G->4K directly, we should use 1G->2M, then 2M->4K to get more compact page table.
+ //
+ ASSERT (SplitAttribute == Page2M || SplitAttribute == Page4K);
+ if ((SplitAttribute == Page2M || SplitAttribute == Page4K)) {
+ NewPageEntry = AllocatePageTableMemory (1);
+ DEBUG ((DEBUG_INFO, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = *PageEntry & PAGING_1G_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = BaseAddress + SIZE_2MB * Index + IA32_PG_PS + ((*PageEntry) & PAGE_PROGATE_BITS);
+ }
+ (*PageEntry) = (UINT64)(UINTN)NewPageEntry + ((*PageEntry) & PAGE_PROGATE_BITS);
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+}
+
+/**
+ This function modifies the page attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ Caller should make sure BaseAddress and Length is at page boundary.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to modify for the memory region.
+ @param[in] IsSet TRUE means to set attributes. FALSE means to clear attributes.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+ @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
+
+ @retval RETURN_SUCCESS The attributes were modified for the memory region.
+ @retval RETURN_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval RETURN_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval RETURN_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval RETURN_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+**/
+RETURN_STATUS
+EFIAPI
+ConvertMemoryPageAttributes (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ IN BOOLEAN IsSet,
+ OUT BOOLEAN *IsSplitted, OPTIONAL
+ OUT BOOLEAN *IsModified OPTIONAL
+ )
+{
+ UINT64 *PageEntry;
+ PAGE_ATTRIBUTE PageAttribute;
+ UINTN PageEntryLength;
+ PAGE_ATTRIBUTE SplitAttribute;
+ RETURN_STATUS Status;
+ BOOLEAN IsEntryModified;
+
+ ASSERT (Attributes != 0);
+ ASSERT ((Attributes & ~(EFI_MEMORY_RP | EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0);
+
+ ASSERT ((BaseAddress & (SIZE_4KB - 1)) == 0);
+ ASSERT ((Length & (SIZE_4KB - 1)) == 0);
+
+ if (Length == 0) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+// DEBUG ((DEBUG_ERROR, "ConvertMemoryPageAttributes(%x) - %016lx, %016lx, %02lx\n", IsSet, BaseAddress, Length, Attributes));
+
+ if (IsSplitted != NULL) {
+ *IsSplitted = FALSE;
+ }
+ if (IsModified != NULL) {
+ *IsModified = FALSE;
+ }
+
+ //
+ // Below logic is to check 2M/4K page to make sure we donot waist memory.
+ //
+ while (Length != 0) {
+ PageEntry = GetPageTableEntry (BaseAddress, &PageAttribute);
+ if (PageEntry == NULL) {
+ return RETURN_UNSUPPORTED;
+ }
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+ SplitAttribute = NeedSplitPage (BaseAddress, Length, PageEntry, PageAttribute);
+ if (SplitAttribute == PageNone) {
+ ConvertPageEntryAttribute (PageEntry, Attributes, IsSet, &IsEntryModified);
+ if (IsEntryModified) {
+ if (IsModified != NULL) {
+ *IsModified = TRUE;
+ }
+ }
+ //
+ // Convert success, move to next
+ //
+ BaseAddress += PageEntryLength;
+ Length -= PageEntryLength;
+ } else {
+ Status = SplitPage (PageEntry, PageAttribute, SplitAttribute);
+ if (RETURN_ERROR (Status)) {
+ return RETURN_UNSUPPORTED;
+ }
+ if (IsSplitted != NULL) {
+ *IsSplitted = TRUE;
+ }
+ if (IsModified != NULL) {
+ *IsModified = TRUE;
+ }
+ //
+ // Just split current page
+ // Convert success in next around
+ //
+ }
+ }
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ FlushTlb on current processor.
+
+ @param[in,out] Buffer Pointer to private data buffer.
+**/
+VOID
+EFIAPI
+FlushTlbOnCurrentProcessor (
+ IN OUT VOID *Buffer
+ )
+{
+ CpuFlushTlb ();
+}
+
+/**
+ FlushTlb for all processors.
+**/
+VOID
+FlushTlbForAll (
+ VOID
+ )
+{
+ UINTN Index;
+
+ FlushTlbOnCurrentProcessor (NULL);
+
+ for (Index = 0; Index < gSmst->NumberOfCpus; Index++) {
+ if (Index != gSmst->CurrentlyExecutingCpu) {
+ // Force to start up AP in blocking mode,
+ SmmBlockingStartupThisAp (FlushTlbOnCurrentProcessor, Index, NULL);
+ // Do not check return status, because AP might not be present in some corner cases.
+ }
+ }
+}
+
+/**
+ This function sets the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to set for the memory region.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSetMemoryAttributesEx (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *IsSplitted OPTIONAL
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN IsModified;
+
+ Status = ConvertMemoryPageAttributes (BaseAddress, Length, Attributes, TRUE, IsSplitted, &IsModified);
+ if (!EFI_ERROR(Status)) {
+ if (IsModified) {
+ //
+ // Flush TLB as last step
+ //
+ FlushTlbForAll();
+ }
+ }
+
+ return Status;
+}
+
+/**
+ This function clears the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to clear for the memory region.
+ @param[out] IsSplitted TRUE means page table splitted. FALSE means page table not splitted.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmClearMemoryAttributesEx (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *IsSplitted OPTIONAL
+ )
+{
+ EFI_STATUS Status;
+ BOOLEAN IsModified;
+
+ Status = ConvertMemoryPageAttributes (BaseAddress, Length, Attributes, FALSE, IsSplitted, &IsModified);
+ if (!EFI_ERROR(Status)) {
+ if (IsModified) {
+ //
+ // Flush TLB as last step
+ //
+ FlushTlbForAll();
+ }
+ }
+
+ return Status;
+}
+
+/**
+ This function sets the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to set for the memory region.
+
+ @retval EFI_SUCCESS The attributes were set for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmSetMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ return SmmSetMemoryAttributesEx (BaseAddress, Length, Attributes, NULL);
+}
+
+/**
+ This function clears the attributes for the memory region specified by BaseAddress and
+ Length from their current attributes to the attributes specified by Attributes.
+
+ @param[in] BaseAddress The physical address that is the start address of a memory region.
+ @param[in] Length The size in bytes of the memory region.
+ @param[in] Attributes The bit mask of attributes to clear for the memory region.
+
+ @retval EFI_SUCCESS The attributes were cleared for the memory region.
+ @retval EFI_ACCESS_DENIED The attributes for the memory resource range specified by
+ BaseAddress and Length cannot be modified.
+ @retval EFI_INVALID_PARAMETER Length is zero.
+ Attributes specified an illegal combination of attributes that
+ cannot be set together.
+ @retval EFI_OUT_OF_RESOURCES There are not enough system resources to modify the attributes of
+ the memory resource range.
+ @retval EFI_UNSUPPORTED The processor does not support one or more bytes of the memory
+ resource range specified by BaseAddress and Length.
+ The bit mask of attributes is not support for the memory resource
+ range specified by BaseAddress and Length.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmClearMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ return SmmClearMemoryAttributesEx (BaseAddress, Length, Attributes, NULL);
+}
+
+
+
+/**
+ Retrieves a pointer to the system configuration table from the SMM System Table
+ based on a specified GUID.
+
+ @param[in] TableGuid The pointer to table's GUID type.
+ @param[out] Table The pointer to the table associated with TableGuid in the EFI System Table.
+
+ @retval EFI_SUCCESS A configuration table matching TableGuid was found.
+ @retval EFI_NOT_FOUND A configuration table matching TableGuid could not be found.
+
+**/
+EFI_STATUS
+EFIAPI
+SmmGetSystemConfigurationTable (
+ IN EFI_GUID *TableGuid,
+ OUT VOID **Table
+ )
+{
+ UINTN Index;
+
+ ASSERT (TableGuid != NULL);
+ ASSERT (Table != NULL);
+
+ *Table = NULL;
+ for (Index = 0; Index < gSmst->NumberOfTableEntries; Index++) {
+ if (CompareGuid (TableGuid, &(gSmst->SmmConfigurationTable[Index].VendorGuid))) {
+ *Table = gSmst->SmmConfigurationTable[Index].VendorTable;
+ return EFI_SUCCESS;
+ }
+ }
+
+ return EFI_NOT_FOUND;
+}
+
+/**
+ This function sets SMM save state buffer to be RW and XP.
+**/
+VOID
+PatchSmmSaveStateMap (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN TileCodeSize;
+ UINTN TileDataSize;
+ UINTN TileSize;
+
+ TileCodeSize = GetSmiHandlerSize ();
+ TileCodeSize = ALIGN_VALUE(TileCodeSize, SIZE_4KB);
+ TileDataSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR);
+ TileDataSize = ALIGN_VALUE(TileDataSize, SIZE_4KB);
+ TileSize = TileDataSize + TileCodeSize - 1;
+ TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize);
+
+ DEBUG ((DEBUG_INFO, "PatchSmmSaveStateMap:\n"));
+ for (Index = 0; Index < mMaxNumberOfCpus - 1; Index++) {
+ //
+ // Code
+ //
+ SmmSetMemoryAttributes (
+ mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET,
+ TileCodeSize,
+ EFI_MEMORY_RO
+ );
+ SmmClearMemoryAttributes (
+ mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET,
+ TileCodeSize,
+ EFI_MEMORY_XP
+ );
+
+ //
+ // Data
+ //
+ SmmClearMemoryAttributes (
+ mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET + TileCodeSize,
+ TileSize - TileCodeSize,
+ EFI_MEMORY_RO
+ );
+ SmmSetMemoryAttributes (
+ mCpuHotPlugData.SmBase[Index] + SMM_HANDLER_OFFSET + TileCodeSize,
+ TileSize - TileCodeSize,
+ EFI_MEMORY_XP
+ );
+ }
+
+ //
+ // Code
+ //
+ SmmSetMemoryAttributes (
+ mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET,
+ TileCodeSize,
+ EFI_MEMORY_RO
+ );
+ SmmClearMemoryAttributes (
+ mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET,
+ TileCodeSize,
+ EFI_MEMORY_XP
+ );
+
+ //
+ // Data
+ //
+ SmmClearMemoryAttributes (
+ mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET + TileCodeSize,
+ SIZE_32KB - TileCodeSize,
+ EFI_MEMORY_RO
+ );
+ SmmSetMemoryAttributes (
+ mCpuHotPlugData.SmBase[mMaxNumberOfCpus - 1] + SMM_HANDLER_OFFSET + TileCodeSize,
+ SIZE_32KB - TileCodeSize,
+ EFI_MEMORY_XP
+ );
+}
+
+/**
+ This function sets GDT/IDT buffer to be RO and XP.
+**/
+VOID
+PatchGdtIdtMap (
+ VOID
+ )
+{
+ EFI_PHYSICAL_ADDRESS BaseAddress;
+ UINTN Size;
+
+ //
+ // GDT
+ //
+ DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - GDT:\n"));
+
+ BaseAddress = mGdtBuffer;
+ Size = ALIGN_VALUE(mGdtBufferSize, SIZE_4KB);
+ SmmSetMemoryAttributes (
+ BaseAddress,
+ Size,
+ EFI_MEMORY_RO
+ );
+ SmmSetMemoryAttributes (
+ BaseAddress,
+ Size,
+ EFI_MEMORY_XP
+ );
+
+ //
+ // IDT
+ //
+ DEBUG ((DEBUG_INFO, "PatchGdtIdtMap - IDT:\n"));
+
+ BaseAddress = gcSmiIdtr.Base;
+ Size = ALIGN_VALUE(gcSmiIdtr.Limit + 1, SIZE_4KB);
+ SmmSetMemoryAttributes (
+ BaseAddress,
+ Size,
+ EFI_MEMORY_RO
+ );
+ SmmSetMemoryAttributes (
+ BaseAddress,
+ Size,
+ EFI_MEMORY_XP
+ );
+}
+
+/**
+ This function sets memory attribute according to MemoryAttributesTable.
+**/
+VOID
+SetMemMapAttributes (
+ VOID
+ )
+{
+ EFI_MEMORY_DESCRIPTOR *MemoryMap;
+ EFI_MEMORY_DESCRIPTOR *MemoryMapStart;
+ UINTN MemoryMapEntryCount;
+ UINTN DescriptorSize;
+ UINTN Index;
+ EDKII_PI_SMM_MEMORY_ATTRIBUTES_TABLE *MemoryAttributesTable;
+
+ SmmGetSystemConfigurationTable (&gEdkiiPiSmmMemoryAttributesTableGuid, (VOID **)&MemoryAttributesTable);
+ if (MemoryAttributesTable == NULL) {
+ DEBUG ((DEBUG_INFO, "MemoryAttributesTable - NULL\n"));
+ return ;
+ }
+
+ DEBUG ((DEBUG_INFO, "MemoryAttributesTable:\n"));
+ DEBUG ((DEBUG_INFO, " Version - 0x%08x\n", MemoryAttributesTable->Version));
+ DEBUG ((DEBUG_INFO, " NumberOfEntries - 0x%08x\n", MemoryAttributesTable->NumberOfEntries));
+ DEBUG ((DEBUG_INFO, " DescriptorSize - 0x%08x\n", MemoryAttributesTable->DescriptorSize));
+
+ MemoryMapEntryCount = MemoryAttributesTable->NumberOfEntries;
+ DescriptorSize = MemoryAttributesTable->DescriptorSize;
+ MemoryMapStart = (EFI_MEMORY_DESCRIPTOR *)(MemoryAttributesTable + 1);
+ MemoryMap = MemoryMapStart;
+ for (Index = 0; Index < MemoryMapEntryCount; Index++) {
+ DEBUG ((DEBUG_INFO, "Entry (0x%x)\n", MemoryMap));
+ DEBUG ((DEBUG_INFO, " Type - 0x%x\n", MemoryMap->Type));
+ DEBUG ((DEBUG_INFO, " PhysicalStart - 0x%016lx\n", MemoryMap->PhysicalStart));
+ DEBUG ((DEBUG_INFO, " VirtualStart - 0x%016lx\n", MemoryMap->VirtualStart));
+ DEBUG ((DEBUG_INFO, " NumberOfPages - 0x%016lx\n", MemoryMap->NumberOfPages));
+ DEBUG ((DEBUG_INFO, " Attribute - 0x%016lx\n", MemoryMap->Attribute));
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, DescriptorSize);
+ }
+
+ MemoryMap = MemoryMapStart;
+ for (Index = 0; Index < MemoryMapEntryCount; Index++) {
+ DEBUG ((DEBUG_INFO, "SetAttribute: Memory Entry - 0x%lx, 0x%x\n", MemoryMap->PhysicalStart, MemoryMap->NumberOfPages));
+ switch (MemoryMap->Type) {
+ case EfiRuntimeServicesCode:
+ SmmSetMemoryAttributes (
+ MemoryMap->PhysicalStart,
+ EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
+ EFI_MEMORY_RO
+ );
+ break;
+ case EfiRuntimeServicesData:
+ SmmSetMemoryAttributes (
+ MemoryMap->PhysicalStart,
+ EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
+ EFI_MEMORY_XP
+ );
+ break;
+ default:
+ SmmSetMemoryAttributes (
+ MemoryMap->PhysicalStart,
+ EFI_PAGES_TO_SIZE((UINTN)MemoryMap->NumberOfPages),
+ EFI_MEMORY_XP
+ );
+ break;
+ }
+ MemoryMap = NEXT_MEMORY_DESCRIPTOR(MemoryMap, DescriptorSize);
+ }
+
+ PatchSmmSaveStateMap ();
+ PatchGdtIdtMap ();
+
+ return ;
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
index 329574e..4b7fad2 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c
@@ -30,11 +30,6 @@ UINTN mSmmProfileSize;
UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE;
//
-// The flag indicates if execute-disable is supported by processor.
-//
-BOOLEAN mXdSupported = TRUE;
-
-//
// The flag indicates if execute-disable is enabled on processor.
//
BOOLEAN mXdEnabled = FALSE;
@@ -529,6 +524,12 @@ InitPaging (
//
continue;
}
+ if ((*Pde & IA32_PG_PS) != 0) {
+ //
+ // This is 1G entry, skip it
+ //
+ continue;
+ }
Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);
if (Pte == 0) {
continue;
@@ -587,6 +588,15 @@ InitPaging (
//
continue;
}
+ if ((*Pde & IA32_PG_PS) != 0) {
+ //
+ // This is 1G entry, set NX bit and skip it
+ //
+ if (mXdSupported) {
+ *Pde = *Pde | IA32_PG_NX;
+ }
+ continue;
+ }
Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK);
if (Pte == 0) {
continue;
@@ -976,25 +986,6 @@ CheckFeatureSupported (
}
/**
- Enable XD feature.
-
-**/
-VOID
-ActivateXd (
- VOID
- )
-{
- UINT64 MsrRegisters;
-
- MsrRegisters = AsmReadMsr64 (MSR_EFER);
- if ((MsrRegisters & MSR_EFER_XD) != 0) {
- return ;
- }
- MsrRegisters |= MSR_EFER_XD;
- AsmWriteMsr64 (MSR_EFER, MsrRegisters);
-}
-
-/**
Enable single step.
**/
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h
index 13ff675..b6fb5cf 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h
@@ -97,15 +97,6 @@ CheckFeatureSupported (
);
/**
- Enable XD feature.
-
-**/
-VOID
-ActivateXd (
- VOID
- );
-
-/**
Update page table according to protected memory ranges and the 4KB-page mapped memory ranges.
**/
@@ -114,7 +105,13 @@ InitPaging (
VOID
);
+//
+// The flag indicates if execute-disable is supported by processor.
+//
extern BOOLEAN mXdSupported;
+//
+// The flag indicates if execute-disable is enabled on processor.
+//
extern BOOLEAN mXdEnabled;
#endif // _SMM_PROFILE_H_
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 9cee784..b3e50a4 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -18,6 +18,8 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#define ACC_MAX_BIT BIT3
LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
BOOLEAN m1GPageTableSupport = FALSE;
+UINT8 mPhysicalAddressBits;
+BOOLEAN mCpuSmmStaticPageTable;
/**
Check if 1-GByte pages is supported by processor or not.
@@ -86,6 +88,146 @@ GetSubEntriesNum (
}
/**
+ Calculate the maximum support address.
+
+ @return the maximum support address.
+**/
+UINT8
+CalculateMaximumSupportAddress (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ UINT8 PhysicalAddressBits;
+ VOID *Hob;
+
+ //
+ // Get physical address bits supported.
+ //
+ Hob = GetFirstHob (EFI_HOB_TYPE_CPU);
+ if (Hob != NULL) {
+ PhysicalAddressBits = ((EFI_HOB_CPU *) Hob)->SizeOfMemorySpace;
+ } else {
+ AsmCpuid (0x80000000, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= 0x80000008) {
+ AsmCpuid (0x80000008, &RegEax, NULL, NULL, NULL);
+ PhysicalAddressBits = (UINT8) RegEax;
+ } else {
+ PhysicalAddressBits = 36;
+ }
+ }
+
+ //
+ // IA-32e paging translates 48-bit linear addresses to 52-bit physical addresses.
+ //
+ ASSERT (PhysicalAddressBits <= 52);
+ if (PhysicalAddressBits > 48) {
+ PhysicalAddressBits = 48;
+ }
+ return PhysicalAddressBits;
+}
+
+/**
+ Set static page table.
+
+ @param[in] PageTable Address of page table.
+**/
+VOID
+SetStaticPageTable (
+ IN UINTN PageTable
+ )
+{
+ UINT64 PageAddress;
+ UINTN NumberOfPml4EntriesNeeded;
+ UINTN NumberOfPdpEntriesNeeded;
+ UINTN IndexOfPml4Entries;
+ UINTN IndexOfPdpEntries;
+ UINTN IndexOfPageDirectoryEntries;
+ UINT64 *PageMapLevel4Entry;
+ UINT64 *PageMap;
+ UINT64 *PageDirectoryPointerEntry;
+ UINT64 *PageDirectory1GEntry;
+ UINT64 *PageDirectoryEntry;
+
+ if (mPhysicalAddressBits <= 39 ) {
+ NumberOfPml4EntriesNeeded = 1;
+ NumberOfPdpEntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 30));
+ } else {
+ NumberOfPml4EntriesNeeded = (UINT32)LShiftU64 (1, (mPhysicalAddressBits - 39));
+ NumberOfPdpEntriesNeeded = 512;
+ }
+
+ //
+ // By architecture only one PageMapLevel4 exists - so lets allocate storage for it.
+ //
+ PageMap = (VOID *) PageTable;
+
+ PageMapLevel4Entry = PageMap;
+ PageAddress = 0;
+ for (IndexOfPml4Entries = 0; IndexOfPml4Entries < NumberOfPml4EntriesNeeded; IndexOfPml4Entries++, PageMapLevel4Entry++) {
+ //
+ // Each PML4 entry points to a page of Page Directory Pointer entries.
+ //
+ PageDirectoryPointerEntry = (UINT64 *) ((*PageMapLevel4Entry) & gPhyMask);
+ if (PageDirectoryPointerEntry == NULL) {
+ PageDirectoryPointerEntry = AllocatePageTableMemory (1);
+ ASSERT(PageDirectoryPointerEntry != NULL);
+ ZeroMem (PageDirectoryPointerEntry, EFI_PAGES_TO_SIZE(1));
+
+ *PageMapLevel4Entry = ((UINTN)PageDirectoryPointerEntry & gPhyMask) | PAGE_ATTRIBUTE_BITS;
+ }
+
+ if (m1GPageTableSupport) {
+ PageDirectory1GEntry = PageDirectoryPointerEntry;
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectory1GEntry++, PageAddress += SIZE_1GB) {
+ if (IndexOfPml4Entries == 0 && IndexOfPageDirectoryEntries < 4) {
+ //
+ // Skip the < 4G entries
+ //
+ continue;
+ }
+ //
+ // Fill in the Page Directory entries
+ //
+ *PageDirectory1GEntry = (PageAddress & gPhyMask) | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+ }
+ } else {
+ PageAddress = BASE_4GB;
+ for (IndexOfPdpEntries = 0; IndexOfPdpEntries < NumberOfPdpEntriesNeeded; IndexOfPdpEntries++, PageDirectoryPointerEntry++) {
+ if (IndexOfPml4Entries == 0 && IndexOfPdpEntries < 4) {
+ //
+ // Skip the < 4G entries
+ //
+ continue;
+ }
+ //
+ // Each Directory Pointer entries points to a page of Page Directory entires.
+ // So allocate space for them and fill them in in the IndexOfPageDirectoryEntries loop.
+ //
+ PageDirectoryEntry = (UINT64 *) ((*PageDirectoryPointerEntry) & gPhyMask);
+ if (PageDirectoryEntry == NULL) {
+ PageDirectoryEntry = AllocatePageTableMemory (1);
+ ASSERT(PageDirectoryEntry != NULL);
+ ZeroMem (PageDirectoryEntry, EFI_PAGES_TO_SIZE(1));
+
+ //
+ // Fill in a Page Directory Pointer Entries
+ //
+ *PageDirectoryPointerEntry = (UINT64)(UINTN)PageDirectoryEntry | PAGE_ATTRIBUTE_BITS;
+ }
+
+ for (IndexOfPageDirectoryEntries = 0; IndexOfPageDirectoryEntries < 512; IndexOfPageDirectoryEntries++, PageDirectoryEntry++, PageAddress += SIZE_2MB) {
+ //
+ // Fill in the Page Directory entries
+ //
+ *PageDirectoryEntry = (UINT64)PageAddress | IA32_PG_PS | PAGE_ATTRIBUTE_BITS;
+ }
+ }
+ }
+ }
+}
+
+/**
Create PageTable for SMM use.
@return The address of PML4 (to set CR3).
@@ -108,11 +250,17 @@ SmmInitPageTable (
//
InitializeSpinLock (mPFLock);
+ mCpuSmmStaticPageTable = PcdGetBool (PcdCpuSmmStaticPageTable);
m1GPageTableSupport = Is1GPageSupport ();
+ DEBUG ((DEBUG_INFO, "1GPageTableSupport - 0x%x\n", m1GPageTableSupport));
+ DEBUG ((DEBUG_INFO, "PcdCpuSmmStaticPageTable - 0x%x\n", mCpuSmmStaticPageTable));
+
+ mPhysicalAddressBits = CalculateMaximumSupportAddress ();
+ DEBUG ((DEBUG_INFO, "PhysicalAddressBits - 0x%x\n", mPhysicalAddressBits));
//
// Generate PAE page table for the first 4GB memory space
//
- Pages = Gen4GPageTable (PAGE_TABLE_PAGES + 1, FALSE);
+ Pages = Gen4GPageTable (FALSE);
//
// Set IA32_PG_PMNT bit to mask this entry
@@ -125,21 +273,28 @@ SmmInitPageTable (
//
// Fill Page-Table-Level4 (PML4) entry
//
- PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (PAGE_TABLE_PAGES + 1));
- *PTEntry = Pages + PAGE_ATTRIBUTE_BITS;
+ PTEntry = (UINT64*)AllocatePageTableMemory (1);
+ ASSERT (PTEntry != NULL);
+ *PTEntry = Pages | PAGE_ATTRIBUTE_BITS;
ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
+
//
// Set sub-entries number
//
SetSubEntriesNum (PTEntry, 3);
- //
- // Add remaining pages to page pool
- //
- FreePage = (LIST_ENTRY*)(PTEntry + EFI_PAGE_SIZE / sizeof (*PTEntry));
- while ((UINTN)FreePage < Pages) {
- InsertTailList (&mPagePool, FreePage);
- FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
+ if (mCpuSmmStaticPageTable) {
+ SetStaticPageTable ((UINTN)PTEntry);
+ } else {
+ //
+ // Add pages to page pool
+ //
+ FreePage = (LIST_ENTRY*)AllocatePageTableMemory (PAGE_TABLE_PAGES);
+ ASSERT (FreePage != NULL);
+ for (Index = 0; Index < PAGE_TABLE_PAGES; Index++) {
+ InsertTailList (&mPagePool, FreePage);
+ FreePage += EFI_PAGE_SIZE / sizeof (*FreePage);
+ }
}
if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
@@ -561,7 +716,7 @@ SmiDefaultPFHandler (
break;
case SmmPageSize1G:
if (!m1GPageTableSupport) {
- DEBUG ((EFI_D_ERROR, "1-GByte pages is not supported!"));
+ DEBUG ((DEBUG_ERROR, "1-GByte pages is not supported!"));
ASSERT (FALSE);
}
//
@@ -612,8 +767,8 @@ SmiDefaultPFHandler (
// Check if the entry has already existed, this issue may occur when the different
// size page entries created under the same entry
//
- DEBUG ((EFI_D_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
- DEBUG ((EFI_D_ERROR, "New page table overlapped with old page table!\n"));
+ DEBUG ((DEBUG_ERROR, "PageTable = %lx, PTIndex = %x, PageTable[PTIndex] = %lx\n", PageTable, PTIndex, PageTable[PTIndex]));
+ DEBUG ((DEBUG_ERROR, "New page table overlapped with old page table!\n"));
ASSERT (FALSE);
}
//
@@ -654,13 +809,18 @@ SmiPFHandler (
PFAddress = AsmReadCr2 ();
+ if (mCpuSmmStaticPageTable && (PFAddress >= LShiftU64 (1, (mPhysicalAddressBits - 1)))) {
+ DEBUG ((DEBUG_ERROR, "Do not support address 0x%lx by processor!\n", PFAddress));
+ CpuDeadLoop ();
+ }
+
//
// If a page fault occurs in SMRAM range, it should be in a SMM stack guard page.
//
if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
(PFAddress >= mCpuHotPlugData.SmrrBase) &&
(PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
- DEBUG ((EFI_D_ERROR, "SMM stack overflow!\n"));
+ DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
CpuDeadLoop ();
}
@@ -670,7 +830,7 @@ SmiPFHandler (
if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
(PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
if ((SystemContext.SystemContextX64->ExceptionData & IA32_PF_EC_ID) != 0) {
- DEBUG ((EFI_D_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
+ DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%lx) out of SMM range after SMM is locked!\n", PFAddress));
DEBUG_CODE (
DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextX64->Rsp);
);
@@ -689,3 +849,87 @@ SmiPFHandler (
ReleaseSpinLock (mPFLock);
}
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ )
+{
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ BOOLEAN IsSplitted;
+ BOOLEAN PageTableSplitted;
+
+ if (!mCpuSmmStaticPageTable) {
+ return ;
+ }
+
+ DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
+
+ //
+ // Disable write protection, because we need mark page table to be write protected.
+ // We need *write* page table memory, to mark itself to be *read only*.
+ //
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ do {
+ DEBUG ((DEBUG_INFO, "Start...\n"));
+ PageTableSplitted = FALSE;
+
+ L4PageTable = (UINT64 *)GetPageTableBase ();
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index4 = 0; Index4 < SIZE_4KB/sizeof(UINT64); Index4++) {
+ L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
+ if (L3PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index3 = 0; Index3 < SIZE_4KB/sizeof(UINT64); Index3++) {
+ if ((L3PageTable[Index3] & IA32_PG_PS) != 0) {
+ // 1G
+ continue;
+ }
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ continue;
+ }
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
+ if (L1PageTable == NULL) {
+ continue;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+ }
+ }
+ } while (PageTableSplitted);
+
+ //
+ // Enable write protection, after page table updated.
+ //
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+
+ return ;
+}
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S
index 7e9ac58..f4761b0 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.S
@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
-# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -24,8 +24,13 @@ ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
ASM_GLOBAL ASM_PFX(gSmiCr3)
ASM_GLOBAL ASM_PFX(gSmiStack)
ASM_GLOBAL ASM_PFX(gSmbase)
+ASM_GLOBAL ASM_PFX(mXdSupported)
ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)
+.equ MSR_IA32_MISC_ENABLE, 0x1A0
+.equ MSR_EFER, 0xc0000080
+.equ MSR_EFER_XD, 0x800
+
#
# Constants relating to PROCESSOR_SMM_DESCRIPTOR
#
@@ -132,6 +137,32 @@ ASM_PFX(gSmiCr3): .space 4
movl $TSS_SEGMENT, %eax
ltr %ax
+# enable NXE if supported
+ .byte 0xb0 # mov al, imm8
+ASM_PFX(mXdSupported): .byte 1
+ cmpb $0, %al
+ jz SkipNxe
+#
+# Check XD disable bit
+#
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ subl $4, %esp
+ pushq %rdx # save MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
+ jz L13
+ andw $0x0FFFB, %dx # clear XD Disable bit if it is set
+ wrmsr
+L13:
+ movl $MSR_EFER, %ecx
+ rdmsr
+ orw $MSR_EFER_XD,%ax # enable NXE
+ wrmsr
+ jmp @NxeDone
+SkipNxe:
+ subl $8, %esp
+NxeDone:
+
#
# Switch to LongMode
#
@@ -139,12 +170,13 @@ ASM_PFX(gSmiCr3): .space 4
call Base # push return address for retf later
Base:
addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg
- movl $0xc0000080, %ecx
+
+ movl $MSR_EFER, %ecx
rdmsr
- orb $1,%ah
+ orb $1,%ah # enable LME
wrmsr
movq %cr0, %rbx
- orl $0x080010000, %ebx # enable paging + WP
+ orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
movq %rbx, %cr0
retf
LongMode: # long mode (64-bit code) starts here
@@ -162,10 +194,10 @@ LongMode: # long mode (64-bit code) starts here
# jmp _SmiHandler ; instruction is not needed
_SmiHandler:
- movq (%rsp), %rbx
+ movq 8(%rsp), %rbx
# Save FP registers
- subq $0x208, %rsp
+ subq $0x200, %rsp
.byte 0x48 # FXSAVE64
fxsave (%rsp)
@@ -191,6 +223,21 @@ _SmiHandler:
.byte 0x48 # FXRSTOR64
fxrstor (%rsp)
+ addq $0x200, %rsp
+
+ movabsq $ASM_PFX(mXdSupported), %rax
+ movb (%rax), %al
+ cmpb $0, %al
+ jz L16
+ popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]
+ testl $BIT2, %edx
+ jz L16
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ rdmsr
+ orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+L16:
rsm
ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm
index 094cf2c..e2fcb6f 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.asm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -29,8 +29,12 @@ EXTERNDEF gcSmiHandlerSize:WORD
EXTERNDEF gSmiCr3:DWORD
EXTERNDEF gSmiStack:DWORD
EXTERNDEF gSmbase:DWORD
+EXTERNDEF mXdSupported:BYTE
EXTERNDEF gSmiHandlerIdtr:FWORD
+MSR_IA32_MISC_ENABLE EQU 1A0h
+MSR_EFER EQU 0c0000080h
+MSR_EFER_XD EQU 0800h
;
; Constants relating to PROCESSOR_SMM_DESCRIPTOR
@@ -130,17 +134,44 @@ gSmiCr3 DD ?
mov eax, TSS_SEGMENT
ltr ax
+; enable NXE if supported
+ DB 0b0h ; mov al, imm8
+mXdSupported DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ sub esp, 4
+ push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz @f
+ and dx, 0FFFBh ; clear XD Disable bit if it is set
+ wrmsr
+@@:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 8
+@XdDone:
+
; Switch into @LongMode
push LONG_MODE_CS ; push cs hardcore here
call Base ; push return address for retf later
Base:
add dword ptr [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
- mov ecx, 0c0000080h
+
+ mov ecx, MSR_EFER
rdmsr
- or ah, 1
+ or ah, 1 ; enable LME
wrmsr
mov rbx, cr0
- or ebx, 080010000h ; enable paging + WP
+ or ebx, 080010023h ; enable paging + WP + NE + MP + PE
mov cr0, rbx
retf
@LongMode: ; long mode (64-bit code) starts here
@@ -163,7 +194,7 @@ _SmiHandler:
;
; Save FP registers
;
- sub rsp, 208h
+ sub rsp, 200h
DB 48h ; FXSAVE64
fxsave [rsp]
@@ -172,15 +203,15 @@ _SmiHandler:
mov rcx, rbx
mov rax, CpuSmmDebugEntry
call rax
-
+
mov rcx, rbx
mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous
call rax
-
+
mov rcx, rbx
mov rax, CpuSmmDebugExit
call rax
-
+
add rsp, 20h
;
@@ -189,6 +220,21 @@ _SmiHandler:
DB 48h ; FXRSTOR64
fxrstor [rsp]
+ add rsp, 200h
+
+ mov rax, ASM_PFX(mXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz @f
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz @f
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+@@:
rsm
gcSmiHandlerSize DW $ - _SmiEntryPoint
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
index b717cda..c3c094f 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
@@ -22,6 +22,10 @@
; Variables referrenced by C code
;
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
;
; Constants relating to PROCESSOR_SMM_DESCRIPTOR
;
@@ -50,6 +54,7 @@ extern ASM_PFX(CpuSmmDebugEntry)
extern ASM_PFX(CpuSmmDebugExit)
global ASM_PFX(gSmbase)
+global ASM_PFX(mXdSupported)
global ASM_PFX(gSmiStack)
global ASM_PFX(gSmiCr3)
global ASM_PFX(gcSmiHandlerTemplate)
@@ -69,7 +74,7 @@ _SmiEntryPoint:
mov [cs:bx + 2], eax
o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
mov ax, PROTECT_MODE_CS
- mov [cs:bx-0x2],ax
+ mov [cs:bx-0x2],ax
DB 0x66, 0xbf ; mov edi, SMBASE
ASM_PFX(gSmbase): DD 0
lea eax, [edi + (@ProtectedMode - _SmiEntryPoint) + 0x8000]
@@ -79,7 +84,7 @@ ASM_PFX(gSmbase): DD 0
or ebx, 0x23
mov cr0, ebx
jmp dword 0x0:0x0
-_GdtDesc:
+_GdtDesc:
DW 0
DD 0
@@ -112,17 +117,44 @@ ASM_PFX(gSmiCr3): DD 0
mov eax, TSS_SEGMENT
ltr ax
+; enable NXE if supported
+ DB 0xb0 ; mov al, imm8
+ASM_PFX(mXdSupported): DB 1
+ cmp al, 0
+ jz @SkipXd
+;
+; Check XD disable bit
+;
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ sub esp, 4
+ push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz .0
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+.0:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 8
+@XdDone:
+
; Switch into @LongMode
push LONG_MODE_CS ; push cs hardcore here
- call Base ; push reture address for retf later
+ call Base ; push return address for retf later
Base:
add dword [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
- mov ecx, 0xc0000080
+
+ mov ecx, MSR_EFER
rdmsr
- or ah, 1
+ or ah, 1 ; enable LME
wrmsr
mov rbx, cr0
- or ebx, 080010000h ; enable paging + WP
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
mov cr0, rbx
retf
@LongMode: ; long mode (64-bit code) starts here
@@ -140,12 +172,12 @@ Base:
; jmp _SmiHandler ; instruction is not needed
_SmiHandler:
- mov rbx, [rsp] ; rbx <- CpuIndex
+ mov rbx, [rsp + 0x8] ; rcx <- CpuIndex
;
; Save FP registers
;
- sub rsp, 0x208
+ sub rsp, 0x200
DB 0x48 ; FXSAVE64
fxsave [rsp]
@@ -154,15 +186,15 @@ _SmiHandler:
mov rcx, rbx
mov rax, CpuSmmDebugEntry
call rax
-
+
mov rcx, rbx
mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous
call rax
-
+
mov rcx, rbx
mov rax, CpuSmmDebugExit
call rax
-
+
add rsp, 0x20
;
@@ -171,6 +203,21 @@ _SmiHandler:
DB 0x48 ; FXRSTOR64
fxrstor [rsp]
+ add rsp, 0x200
+
+ mov rax, ASM_PFX(mXdSupported)
+ mov al, [rax]
+ cmp al, 0
+ jz .1
+ pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .1
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.1:
rsm
gcSmiHandlerSize DW $ - _SmiEntryPoint
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.S b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.S
index 2ae6f2c..2e2792d 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.S
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.S
@@ -1,6 +1,6 @@
#------------------------------------------------------------------------------
#
-# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
@@ -128,244 +128,8 @@ ASM_PFX(gcSmiGdtr):
.quad NullSeg
ASM_PFX(gcSmiIdtr):
- .word IDT_SIZE - 1
- .quad _SmiIDT
-
-
-#
-# Here is the IDT. There are 32 (not 255) entries in it since only processor
-# generated exceptions will be handled.
-#
-_SmiIDT:
-# The following segment repeats 32 times:
-# No. 1
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 2
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 3
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 4
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 5
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 6
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 7
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 8
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 9
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 10
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 11
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 12
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 13
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 14
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 15
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 16
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 17
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 18
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 19
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 20
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 21
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 22
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 23
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 24
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 25
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 26
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 27
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 28
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 29
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 30
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 31
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-# No. 32
- .word 0 # Offset 0:15
- .word CODE_SEL
- .byte 0 # Unused
- .byte 0x8e # Interrupt Gate, Present
- .word 0 # Offset 16:31
- .quad 0 # Offset 32:63
-
-_SmiIDTEnd:
-
-.equ IDT_SIZE, (_SmiIDTEnd - _SmiIDT)
+ .word 0
+ .quad 0
.text
@@ -600,11 +364,3 @@ L5:
addq $16, %rsp # skip INT# & ErrCode
iretq
-ASM_GLOBAL ASM_PFX(InitializeIDTSmmStackGuard)
-ASM_PFX(InitializeIDTSmmStackGuard):
-# If SMM Stack Guard feature is enabled, set the IST field of
-# the interrupt gate for Page Fault Exception to be 1
-#
- movabsq $_SmiIDT + 14 * 16, %rax
- movb $1, 4(%rax)
- ret
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.asm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.asm
index ab71645..f55ba72 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.asm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.asm
@@ -1,5 +1,5 @@
;------------------------------------------------------------------------------ ;
-; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
@@ -144,27 +144,8 @@ gcSmiGdtr LABEL FWORD
DQ offset NullSeg
gcSmiIdtr LABEL FWORD
- DW IDT_SIZE - 1
- DQ offset _SmiIDT
-
- .data
-
-;
-; Here is the IDT. There are 32 (not 255) entries in it since only processor
-; generated exceptions will be handled.
-;
-_SmiIDT:
-REPEAT 32
- DW 0 ; Offset 0:15
- DW CODE_SEL ; Segment selector
- DB 0 ; Unused
- DB 8eh ; Interrupt Gate, Present
- DW 0 ; Offset 16:31
- DQ 0 ; Offset 32:63
- ENDM
-_SmiIDTEnd:
-
-IDT_SIZE = (offset _SmiIDTEnd - offset _SmiIDT)
+ DW 0
+ DQ 0
.code
@@ -400,14 +381,4 @@ PageFaultIdtHandlerSmmProfile PROC
iretq
PageFaultIdtHandlerSmmProfile ENDP
-InitializeIDTSmmStackGuard PROC
-;
-; If SMM Stack Guard feature is enabled, set the IST field of
-; the interrupt gate for Page Fault Exception to be 1
-;
- lea rax, _SmiIDT + 14 * 16
- mov byte ptr [rax + 4], 1
- ret
-InitializeIDTSmmStackGuard ENDP
-
END
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
index 821ee18..bc8d95d 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiException.nasm
@@ -145,25 +145,8 @@ ASM_PFX(gcSmiGdtr):
DQ NullSeg
ASM_PFX(gcSmiIdtr):
- DW IDT_SIZE - 1
- DQ _SmiIDT
-
-;
-; Here is the IDT. There are 32 (not 255) entries in it since only processor
-; generated exceptions will be handled.
-;
-_SmiIDT:
-%rep 32
- DW 0 ; 0:15
- DW CODE_SEL ; Segment selector
- DB 0 ; Unused
- DB 0x8e ; Interrupt Gate, Present
- DW 0 ; 16:31
- DQ 0 ; 32:63
-%endrep
-_SmiIDTEnd:
-
-IDT_SIZE equ _SmiIDTEnd - _SmiIDT
+ DW 0
+ DQ 0
DEFAULT REL
SECTION .text
@@ -400,13 +383,3 @@ ASM_PFX(PageFaultIdtHandlerSmmProfile):
add rsp, 16 ; skip INT# & ErrCode
iretq
-global ASM_PFX(InitializeIDTSmmStackGuard)
-ASM_PFX(InitializeIDTSmmStackGuard):
-;
-; If SMM Stack Guard feature is enabled, set the IST field of
-; the interrupt gate for Page Fault Exception to be 1
-;
- lea rax, [_SmiIDT + 14 * 16]
- mov byte [rax + 4], 1
- ret
-
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
index b53aa45..e2eca73 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmFuncsArch.c
@@ -1,7 +1,7 @@
/** @file
SMM CPU misc functions for x64 arch specific.
-Copyright (c) 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2015 - 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -14,6 +14,30 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#include "PiSmmCpuDxeSmm.h"
+EFI_PHYSICAL_ADDRESS mGdtBuffer;
+UINTN mGdtBufferSize;
+
+/**
+ Initialize IDT for SMM Stack Guard.
+
+**/
+VOID
+EFIAPI
+InitializeIDTSmmStackGuard (
+ VOID
+ )
+{
+ IA32_IDT_GATE_DESCRIPTOR *IdtGate;
+
+ //
+ // If SMM Stack Guard feature is enabled, set the IST field of
+ // the interrupt gate for Page Fault Exception to be 1
+ //
+ IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
+ IdtGate += EXCEPT_IA32_PAGE_FAULT;
+ IdtGate->Bits.Reserved_0 = 1;
+}
+
/**
Initialize Gdt for all processors.
@@ -41,8 +65,10 @@ InitGdt (
// on each SMI entry.
//
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
- GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus));
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
index 065fb2c..cc393dc 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmProfileArch.c
@@ -1,7 +1,7 @@
/** @file
X64 processor specific functions to enable SMM profile.
-Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -45,12 +45,13 @@ InitSmmS3Cr3 (
//
// Generate PAE page table for the first 4GB memory space
//
- Pages = Gen4GPageTable (1, FALSE);
+ Pages = Gen4GPageTable (FALSE);
//
// Fill Page-Table-Level4 (PML4) entry
//
- PTEntry = (UINT64*)(UINTN)(Pages - EFI_PAGES_TO_SIZE (1));
+ PTEntry = (UINT64*)AllocatePageTableMemory (1);
+ ASSERT (PTEntry != NULL);
*PTEntry = Pages | PAGE_ATTRIBUTE_BITS;
ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
--
2.7.4.windows.1
^ permalink raw reply related [flat|nested] 21+ messages in thread