* [PATCH] UefiCpuPkg/PiSmmCpu: Enable 5L paging only when phy addr line > 48
@ 2019-09-05 22:19 Ni, Ray
2019-09-09 0:51 ` Dong, Eric
0 siblings, 1 reply; 2+ messages in thread
From: Ni, Ray @ 2019-09-05 22:19 UTC (permalink / raw)
To: devel; +Cc: Liming Gao, Eric Dong, Laszlo Ersek
Today's behavior is to enable 5l paging when CPU supports it
(CPUID[7,0].ECX.BIT[16] is set).
The patch changes the behavior to enable 5l paging when two
conditions are both met:
1. CPU supports it;
2. The max physical address bits is bigger than 48.
Because 4-level paging can support to address physical address up to
2^48 - 1, there is no need to enable 5-level paging with max
physical address bits <= 48.
Signed-off-by: Ray Ni <ray.ni@intel.com>
Cc: Liming Gao <liming.gao@intel.com>
Cc: Eric Dong <Eric.dong@intel.com>
Cc: Laszlo Ersek <lersek@redhat.com>
---
UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c | 57 +++++++++++++--------
UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm | 4 +-
2 files changed, 39 insertions(+), 22 deletions(-)
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
index 733d107efd..e5c4788c13 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
@@ -16,8 +16,8 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE (mPagePool);
BOOLEAN m1GPageTableSupport = FALSE;
BOOLEAN mCpuSmmRestrictedMemoryAccess;
-BOOLEAN m5LevelPagingSupport;
-X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;
+BOOLEAN m5LevelPagingNeeded;
+X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
/**
Disable CET.
@@ -63,28 +63,45 @@ Is1GPageSupport (
}
/**
- Check if 5-level paging is supported by processor or not.
-
- @retval TRUE 5-level paging is supported.
- @retval FALSE 5-level paging is not supported.
+ The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16] is set) and
+ the max physical address bits is bigger than 48. Because 4-level paging can support
+ to address physical address up to 2^48 - 1, there is no need to enable 5-level paging
+ with max physical address bits <= 48.
+ @retval TRUE 5-level paging enabling is needed.
+ @retval FALSE 5-level paging enabling is not needed.
**/
BOOLEAN
-Is5LevelPagingSupport (
+Is5LevelPagingNeeded (
VOID
)
{
- CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
+ CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
+ CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
+ UINT32 MaxExtendedFunctionId;
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL, NULL, NULL);
+ if (MaxExtendedFunctionId >= CPUID_VIR_PHY_ADDRESS_SIZE) {
+ AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32, NULL, NULL, NULL);
+ } else {
+ VirPhyAddressSize.Bits.PhysicalAddressBits = 36;
+ }
AsmCpuidEx (
CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
- NULL,
- NULL,
- &EcxFlags.Uint32,
- NULL
+ NULL, NULL, &ExtFeatureEcx.Uint32, NULL
);
- return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
+ DEBUG ((
+ DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
+ VirPhyAddressSize.Bits.PhysicalAddressBits, ExtFeatureEcx.Bits.FiveLevelPage
+ ));
+
+ if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
+ ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
+ return TRUE;
+ } else {
+ return FALSE;
+ }
}
/**
@@ -190,7 +207,7 @@ SetStaticPageTable (
// when 5-Level Paging is disabled.
//
ASSERT (mPhysicalAddressBits <= 52);
- if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {
+ if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {
mPhysicalAddressBits = 48;
}
@@ -217,7 +234,7 @@ SetStaticPageTable (
PageMapLevel4Entry = PageMap;
PageMapLevel5Entry = NULL;
- if (m5LevelPagingSupport) {
+ if (m5LevelPagingNeeded) {
//
// By architecture only one PageMapLevel5 exists - so lets allocate storage for it.
//
@@ -233,7 +250,7 @@ SetStaticPageTable (
// So lets allocate space for them and fill them in in the IndexOfPml4Entries loop.
// When 5-Level Paging is disabled, below allocation happens only once.
//
- if (m5LevelPagingSupport) {
+ if (m5LevelPagingNeeded) {
PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) & ~mAddressEncMask & gPhyMask);
if (PageMapLevel4Entry == NULL) {
PageMapLevel4Entry = AllocatePageTableMemory (1);
@@ -336,10 +353,10 @@ SmmInitPageTable (
mCpuSmmRestrictedMemoryAccess = PcdGetBool (PcdCpuSmmRestrictedMemoryAccess);
m1GPageTableSupport = Is1GPageSupport ();
- m5LevelPagingSupport = Is5LevelPagingSupport ();
+ m5LevelPagingNeeded = Is5LevelPagingNeeded ();
mPhysicalAddressBits = CalculateMaximumSupportAddress ();
- PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);
- DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n", m5LevelPagingSupport));
+ PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
+ DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n", m5LevelPagingNeeded));
DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n", m1GPageTableSupport));
DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n", mCpuSmmRestrictedMemoryAccess));
DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n", mPhysicalAddressBits));
@@ -370,7 +387,7 @@ SmmInitPageTable (
SetSubEntriesNum (Pml4Entry, 3);
PTEntry = Pml4Entry;
- if (m5LevelPagingSupport) {
+ if (m5LevelPagingNeeded) {
//
// Fill PML5 entry
//
diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
index 271492a9d7..db06d22d51 100644
--- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
+++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
@@ -69,7 +69,7 @@ extern ASM_PFX(mXdSupported)
global ASM_PFX(gPatchXdSupported)
global ASM_PFX(gPatchSmiStack)
global ASM_PFX(gPatchSmiCr3)
-global ASM_PFX(gPatch5LevelPagingSupport)
+global ASM_PFX(gPatch5LevelPagingNeeded)
global ASM_PFX(gcSmiHandlerTemplate)
global ASM_PFX(gcSmiHandlerSize)
@@ -127,7 +127,7 @@ ASM_PFX(gPatchSmiCr3):
mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3
mov cl, strict byte 0 ; source operand will be patched
-ASM_PFX(gPatch5LevelPagingSupport):
+ASM_PFX(gPatch5LevelPagingNeeded):
cmp cl, 0
je SkipEnable5LevelPaging
;
--
2.21.0.windows.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] UefiCpuPkg/PiSmmCpu: Enable 5L paging only when phy addr line > 48
2019-09-05 22:19 [PATCH] UefiCpuPkg/PiSmmCpu: Enable 5L paging only when phy addr line > 48 Ni, Ray
@ 2019-09-09 0:51 ` Dong, Eric
0 siblings, 0 replies; 2+ messages in thread
From: Dong, Eric @ 2019-09-09 0:51 UTC (permalink / raw)
To: Ni, Ray, devel@edk2.groups.io; +Cc: Gao, Liming, Laszlo Ersek
Reviewed-by: Eric Dong <eric.dong@intel.com>
> -----Original Message-----
> From: Ni, Ray
> Sent: Friday, September 6, 2019 6:19 AM
> To: devel@edk2.groups.io
> Cc: Gao, Liming <liming.gao@intel.com>; Dong, Eric <eric.dong@intel.com>;
> Laszlo Ersek <lersek@redhat.com>
> Subject: [PATCH] UefiCpuPkg/PiSmmCpu: Enable 5L paging only when phy addr
> line > 48
>
> Today's behavior is to enable 5l paging when CPU supports it
> (CPUID[7,0].ECX.BIT[16] is set).
>
> The patch changes the behavior to enable 5l paging when two conditions are
> both met:
> 1. CPU supports it;
> 2. The max physical address bits is bigger than 48.
>
> Because 4-level paging can support to address physical address up to
> 2^48 - 1, there is no need to enable 5-level paging with max physical address
> bits <= 48.
>
> Signed-off-by: Ray Ni <ray.ni@intel.com>
> Cc: Liming Gao <liming.gao@intel.com>
> Cc: Eric Dong <Eric.dong@intel.com>
> Cc: Laszlo Ersek <lersek@redhat.com>
> ---
> UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c | 57 +++++++++++++--------
> UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm | 4 +-
> 2 files changed, 39 insertions(+), 22 deletions(-)
>
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> index 733d107efd..e5c4788c13 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/PageTbl.c
> @@ -16,8 +16,8 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
> LIST_ENTRY mPagePool = INITIALIZE_LIST_HEAD_VARIABLE
> (mPagePool);
> BOOLEAN m1GPageTableSupport = FALSE;
> BOOLEAN mCpuSmmRestrictedMemoryAccess;
> -BOOLEAN m5LevelPagingSupport;
> -X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingSupport;
> +BOOLEAN m5LevelPagingNeeded;
> +X86_ASSEMBLY_PATCH_LABEL gPatch5LevelPagingNeeded;
>
> /**
> Disable CET.
> @@ -63,28 +63,45 @@ Is1GPageSupport (
> }
>
> /**
> - Check if 5-level paging is supported by processor or not.
> -
> - @retval TRUE 5-level paging is supported.
> - @retval FALSE 5-level paging is not supported.
> + The routine returns TRUE when CPU supports it (CPUID[7,0].ECX.BIT[16]
> + is set) and the max physical address bits is bigger than 48. Because
> + 4-level paging can support to address physical address up to 2^48 -
> + 1, there is no need to enable 5-level paging with max physical address bits
> <= 48.
>
> + @retval TRUE 5-level paging enabling is needed.
> + @retval FALSE 5-level paging enabling is not needed.
> **/
> BOOLEAN
> -Is5LevelPagingSupport (
> +Is5LevelPagingNeeded (
> VOID
> )
> {
> - CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX EcxFlags;
> + CPUID_VIR_PHY_ADDRESS_SIZE_EAX VirPhyAddressSize;
> + CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_ECX ExtFeatureEcx;
> + UINT32 MaxExtendedFunctionId;
>
> + AsmCpuid (CPUID_EXTENDED_FUNCTION, &MaxExtendedFunctionId, NULL,
> + NULL, NULL); if (MaxExtendedFunctionId >=
> CPUID_VIR_PHY_ADDRESS_SIZE) {
> + AsmCpuid (CPUID_VIR_PHY_ADDRESS_SIZE, &VirPhyAddressSize.Uint32,
> + NULL, NULL, NULL); } else {
> + VirPhyAddressSize.Bits.PhysicalAddressBits = 36; }
> AsmCpuidEx (
> CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
> CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO,
> - NULL,
> - NULL,
> - &EcxFlags.Uint32,
> - NULL
> + NULL, NULL, &ExtFeatureEcx.Uint32, NULL
> );
> - return (BOOLEAN) (EcxFlags.Bits.FiveLevelPage != 0);
> + DEBUG ((
> + DEBUG_INFO, "PhysicalAddressBits = %d, 5LPageTable = %d.\n",
> + VirPhyAddressSize.Bits.PhysicalAddressBits,
> ExtFeatureEcx.Bits.FiveLevelPage
> + ));
> +
> + if (VirPhyAddressSize.Bits.PhysicalAddressBits > 4 * 9 + 12) {
> + ASSERT (ExtFeatureEcx.Bits.FiveLevelPage == 1);
> + return TRUE;
> + } else {
> + return FALSE;
> + }
> }
>
> /**
> @@ -190,7 +207,7 @@ SetStaticPageTable (
> // when 5-Level Paging is disabled.
> //
> ASSERT (mPhysicalAddressBits <= 52);
> - if (!m5LevelPagingSupport && mPhysicalAddressBits > 48) {
> + if (!m5LevelPagingNeeded && mPhysicalAddressBits > 48) {
> mPhysicalAddressBits = 48;
> }
>
> @@ -217,7 +234,7 @@ SetStaticPageTable (
>
> PageMapLevel4Entry = PageMap;
> PageMapLevel5Entry = NULL;
> - if (m5LevelPagingSupport) {
> + if (m5LevelPagingNeeded) {
> //
> // By architecture only one PageMapLevel5 exists - so lets allocate storage
> for it.
> //
> @@ -233,7 +250,7 @@ SetStaticPageTable (
> // So lets allocate space for them and fill them in in the IndexOfPml4Entries
> loop.
> // When 5-Level Paging is disabled, below allocation happens only once.
> //
> - if (m5LevelPagingSupport) {
> + if (m5LevelPagingNeeded) {
> PageMapLevel4Entry = (UINT64 *) ((*PageMapLevel5Entry) &
> ~mAddressEncMask & gPhyMask);
> if (PageMapLevel4Entry == NULL) {
> PageMapLevel4Entry = AllocatePageTableMemory (1); @@ -336,10
> +353,10 @@ SmmInitPageTable (
>
> mCpuSmmRestrictedMemoryAccess = PcdGetBool
> (PcdCpuSmmRestrictedMemoryAccess);
> m1GPageTableSupport = Is1GPageSupport ();
> - m5LevelPagingSupport = Is5LevelPagingSupport ();
> + m5LevelPagingNeeded = Is5LevelPagingNeeded ();
> mPhysicalAddressBits = CalculateMaximumSupportAddress ();
> - PatchInstructionX86 (gPatch5LevelPagingSupport, m5LevelPagingSupport, 1);
> - DEBUG ((DEBUG_INFO, "5LevelPaging Support - %d\n",
> m5LevelPagingSupport));
> + PatchInstructionX86 (gPatch5LevelPagingNeeded, m5LevelPagingNeeded, 1);
> + DEBUG ((DEBUG_INFO, "5LevelPaging Needed - %d\n",
> m5LevelPagingNeeded));
> DEBUG ((DEBUG_INFO, "1GPageTable Support - %d\n",
> m1GPageTableSupport));
> DEBUG ((DEBUG_INFO, "PcdCpuSmmRestrictedMemoryAccess - %d\n",
> mCpuSmmRestrictedMemoryAccess));
> DEBUG ((DEBUG_INFO, "PhysicalAddressBits - %d\n",
> mPhysicalAddressBits));
> @@ -370,7 +387,7 @@ SmmInitPageTable (
> SetSubEntriesNum (Pml4Entry, 3);
> PTEntry = Pml4Entry;
>
> - if (m5LevelPagingSupport) {
> + if (m5LevelPagingNeeded) {
> //
> // Fill PML5 entry
> //
> diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> index 271492a9d7..db06d22d51 100644
> --- a/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmiEntry.nasm
> @@ -69,7 +69,7 @@ extern ASM_PFX(mXdSupported) global
> ASM_PFX(gPatchXdSupported) global ASM_PFX(gPatchSmiStack) global
> ASM_PFX(gPatchSmiCr3) -global ASM_PFX(gPatch5LevelPagingSupport)
> +global ASM_PFX(gPatch5LevelPagingNeeded)
> global ASM_PFX(gcSmiHandlerTemplate)
> global ASM_PFX(gcSmiHandlerSize)
>
> @@ -127,7 +127,7 @@ ASM_PFX(gPatchSmiCr3):
> mov eax, 0x668 ; as cr4.PGE is not set here, refresh cr3
>
> mov cl, strict byte 0 ; source operand will be patched
> -ASM_PFX(gPatch5LevelPagingSupport):
> +ASM_PFX(gPatch5LevelPagingNeeded):
> cmp cl, 0
> je SkipEnable5LevelPaging
> ;
> --
> 2.21.0.windows.1
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2019-09-09 0:51 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-09-05 22:19 [PATCH] UefiCpuPkg/PiSmmCpu: Enable 5L paging only when phy addr line > 48 Ni, Ray
2019-09-09 0:51 ` Dong, Eric
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox