From: "Ni, Ray" <ray.ni@intel.com>
To: devel@edk2.groups.io
Cc: Eric Dong <eric.dong@intel.com>
Subject: [PATCH 01/10] UefiCpuPkg: Create CpuPageTableLib for manipulating X86 paging structs
Date: Mon, 18 Jul 2022 21:18:22 +0800 [thread overview]
Message-ID: <20220718131831.660-2-ray.ni@intel.com> (raw)
In-Reply-To: <20220718131831.660-1-ray.ni@intel.com>
The lib includes two APIs:
* PageTableMap
It creates/updates mapping from LA to PA.
The implementation only supports paging structures used in 64bit
mode now. PAE paging structure support will be added in future.
* PageTableParse
It parses the page table and returns the mapping relations in an
array of IA32_MAP_ENTRY.
It passed some stress tests. These test code will be upstreamed in
other patches following edk2 Unit Test framework.
Signed-off-by: Ray Ni <ray.ni@intel.com>
Cc: Eric Dong <eric.dong@intel.com>
---
UefiCpuPkg/Include/Library/CpuPageTableLib.h | 129 +++++
.../Library/CpuPageTableLib/CpuPageTable.h | 204 +++++++
.../CpuPageTableLib/CpuPageTableLib.inf | 35 ++
.../Library/CpuPageTableLib/CpuPageTableMap.c | 543 ++++++++++++++++++
.../CpuPageTableLib/CpuPageTableParse.c | 330 +++++++++++
UefiCpuPkg/UefiCpuPkg.dec | 3 +
UefiCpuPkg/UefiCpuPkg.dsc | 4 +-
7 files changed, 1247 insertions(+), 1 deletion(-)
create mode 100644 UefiCpuPkg/Include/Library/CpuPageTableLib.h
create mode 100644 UefiCpuPkg/Library/CpuPageTableLib/CpuPageTable.h
create mode 100644 UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableLib.inf
create mode 100644 UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableMap.c
create mode 100644 UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableParse.c
diff --git a/UefiCpuPkg/Include/Library/CpuPageTableLib.h b/UefiCpuPkg/Include/Library/CpuPageTableLib.h
new file mode 100644
index 0000000000..2dc9b7d18e
--- /dev/null
+++ b/UefiCpuPkg/Include/Library/CpuPageTableLib.h
@@ -0,0 +1,129 @@
+/** @file
+ Public include file for PageTableLib library.
+
+ Copyright (c) 2022, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef PAGE_TABLE_LIB_H_
+#define PAGE_TABLE_LIB_H_
+
+typedef union {
+ struct {
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; // 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; // 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; // 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Dirty : 1; // 0 = Not dirty, 1 = Dirty (set by CPU)
+ UINT64 Pat : 1; // PAT
+
+ UINT64 Global : 1; // 0 = Not global, 1 = Global (if CR4.PGE = 1)
+ UINT64 Reserved1 : 3; // Ignored
+
+ UINT64 PageTableBaseAddress : 40; // Page Table Base Address
+ UINT64 Reserved2 : 7; // Ignored
+ UINT64 ProtectionKey : 4; // Protection key
+ UINT64 Nx : 1; // No Execute bit
+ } Bits;
+ UINT64 Uint64;
+} IA32_MAP_ATTRIBUTE;
+
+#define IA32_MAP_ATTRIBUTE_PAGE_TABLE_BASE_ADDRESS_MASK 0xFFFFFFFFFF000ull
+#define IA32_MAP_ATTRIBUTE_PAGE_TABLE_BASE_ADDRESS(pa) ((pa)->Uint64 & IA32_MAP_ATTRIBUTE_PAGE_TABLE_BASE_ADDRESS_MASK)
+#define IA32_MAP_ATTRIBUTE_ATTRIBUTES(pa) ((pa)->Uint64 & ~IA32_MAP_ATTRIBUTE_PAGE_TABLE_BASE_ADDRESS_MASK)
+
+//
+// Below enum follows "4.1.1 Four Paging Modes" in Chapter 4 Paging of SDM Volume 3.
+// Page1GB is only supported in 4-level and 5-level.
+//
+typedef enum {
+ Paging32bit,
+
+ //
+ // High byte in paging mode indicates the max levels of the page table.
+ // Low byte in paging mode indicates the max level that can be a leaf entry.
+ //
+ PagingPae = 0x0302,
+
+ Paging4Level = 0x0402,
+ Paging4Level1GB = 0x0403,
+
+ Paging5Level = 0x0502,
+ Paging5Level1GB = 0x0503,
+
+ PagingModeMax
+} PAGING_MODE;
+
+/**
+ Create or update page table to map [LinearAddress, LinearAddress + Length) with specified attribute.
+
+ @param[in, out] PageTable The pointer to the page table to update, or pointer to NULL if a new page table is to be created.
+ @param[in] PagingMode The paging mode.
+ @param[in] Buffer The free buffer to be used for page table creation/updating.
+ @param[in, out] BufferSize The buffer size.
+ On return, the remaining buffer size.
+ The free buffer is used from the end so caller can supply the same Buffer pointer with an updated
+ BufferSize in the second call to this API.
+ @param[in] LinearAddress The start of the linear address range.
+ @param[in] Length The length of the linear address range.
+ @param[in] Attribute The attribute of the linear address range.
+ All non-reserved fields in IA32_MAP_ATTRIBUTE are supported to set in the page table.
+ Page table entries that map the linear address range are reset to 0 before set to the new attribute
+ when a new physical base address is set.
+ @param[in] Mask The mask used for attribute. The corresponding field in Attribute is ignored if that in Mask is 0.
+
+ @retval RETURN_UNSUPPORTED PagingMode is not supported.
+ @retval RETURN_INVALID_PARAMETER PageTable, BufferSize, Attribute or Mask is NULL.
+ @retval RETURN_INVALID_PARAMETER *BufferSize is not multiple of 4KB.
+ @retval RETURN_BUFFER_TOO_SMALL The buffer is too small for page table creation/updating.
+ BufferSize is updated to indicate the expected buffer size.
+ Caller may still get RETURN_BUFFER_TOO_SMALL with the new BufferSize.
+ @retval RETURN_SUCCESS PageTable is created/updated successfully.
+**/
+RETURN_STATUS
+EFIAPI
+PageTableMap (
+ IN OUT UINTN *PageTable OPTIONAL,
+ IN PAGING_MODE PagingMode,
+ IN VOID *Buffer,
+ IN OUT UINTN *BufferSize,
+ IN UINT64 LinearAddress,
+ IN UINT64 Length,
+ IN IA32_MAP_ATTRIBUTE *Attribute,
+ IN IA32_MAP_ATTRIBUTE *Mask
+ );
+
+typedef struct {
+ UINT64 LinearAddress;
+ UINT64 Length;
+ IA32_MAP_ATTRIBUTE Attribute;
+} IA32_MAP_ENTRY;
+
+/**
+ Parse page table.
+
+ @param[in] PageTable Pointer to the page table.
+ @param[in] PagingMode The paging mode.
+ @param[out] Map Return an array that describes multiple linear address ranges.
+ @param[in, out] MapCount On input, the maximum number of entries that Map can hold.
+ On output, the number of entries in Map.
+
+ @retval RETURN_UNSUPPORTED PageLevel is not 5 or 4.
+ @retval RETURN_INVALID_PARAMETER MapCount is NULL.
+ @retval RETURN_INVALID_PARAMETER *MapCount is not 0 but Map is NULL.
+ @retval RETURN_BUFFER_TOO_SMALL *MapCount is too small.
+ @retval RETURN_SUCCESS Page table is parsed successfully.
+**/
+RETURN_STATUS
+EFIAPI
+PageTableParse (
+ IN UINTN PageTable,
+ IN PAGING_MODE PagingMode,
+ IN IA32_MAP_ENTRY *Map,
+ IN OUT UINTN *MapCount
+ );
+
+#endif
diff --git a/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTable.h b/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTable.h
new file mode 100644
index 0000000000..c041ea3f56
--- /dev/null
+++ b/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTable.h
@@ -0,0 +1,204 @@
+/** @file
+ Internal header for CpuPageTableLib.
+
+ Copyright (c) 2022, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef CPU_PAGE_TABLE_H_
+#define CPU_PAGE_TABLE_H_
+
+#include <Base.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/DebugLib.h>
+#include <Library/CpuPageTableLib.h>
+
+#define IA32_PE_BASE_ADDRESS_MASK_40 0xFFFFFFFFFF000ull
+#define IA32_PE_BASE_ADDRESS_MASK_39 0xFFFFFFFFFE000ull
+
+#define REGION_LENGTH(l) LShiftU64 (1, (l) * 9 + 3)
+
+typedef struct {
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory
+} IA32_PAGE_COMMON_ENTRY;
+
+///
+/// Format of a non-leaf entry that references a page table entry
+///
+typedef union {
+ struct {
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; // 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; // 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; // 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Available0 : 1; // Ignored
+ UINT64 MustBeZero : 1; // Must Be Zero
+
+ UINT64 Available2 : 4; // Ignored
+
+ UINT64 PageTableBaseAddress : 40; // Page Table Base Address
+ UINT64 Available3 : 11; // Ignored
+ UINT64 Nx : 1; // No Execute bit
+ } Bits;
+ UINT64 Uint64;
+} IA32_PAGE_NON_LEAF_ENTRY;
+
+#define IA32_PNLE_PAGE_TABLE_BASE_ADDRESS(pa) ((pa)->Uint64 & IA32_PE_BASE_ADDRESS_MASK_40)
+
+///
+/// Format of a PML5 Entry (PML5E) that References a PML4 Table
+///
+typedef IA32_PAGE_NON_LEAF_ENTRY IA32_PML5E;
+
+///
+/// Format of a PML4 Entry (PML4E) that References a Page-Directory-Pointer Table
+///
+typedef IA32_PAGE_NON_LEAF_ENTRY IA32_PML4E;
+
+///
+/// Format of a Page-Directory-Pointer-Table Entry (PDPTE) that References a Page Directory
+///
+typedef IA32_PAGE_NON_LEAF_ENTRY IA32_PDPTE;
+
+///
+/// Format of a Page-Directory Entry that References a Page Table
+///
+typedef IA32_PAGE_NON_LEAF_ENTRY IA32_PDE;
+
+///
+/// Format of a leaf entry that Maps a 1-Gbyte or 2-MByte Page
+///
+typedef union {
+ struct {
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; // 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; // 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; // 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Dirty : 1; // 0 = Not dirty, 1 = Dirty (set by CPU)
+ UINT64 MustBeOne : 1; // Page Size. Must Be One
+
+ UINT64 Global : 1; // 0 = Not global, 1 = Global (if CR4.PGE = 1)
+ UINT64 Available1 : 3; // Ignored
+ UINT64 Pat : 1; // PAT
+
+ UINT64 PageTableBaseAddress : 39; // Page Table Base Address
+ UINT64 Available3 : 7; // Ignored
+ UINT64 ProtectionKey : 4; // Protection key
+ UINT64 Nx : 1; // No Execute bit
+ } Bits;
+ UINT64 Uint64;
+} IA32_PAGE_LEAF_ENTRY_BIG_PAGESIZE;
+#define IA32_PLEB_PAGE_TABLE_BASE_ADDRESS(pa) ((pa)->Uint64 & IA32_PE_BASE_ADDRESS_MASK_39)
+
+///
+/// Format of a Page-Directory Entry that Maps a 2-MByte Page
+///
+typedef IA32_PAGE_LEAF_ENTRY_BIG_PAGESIZE IA32_PDE_2M;
+
+///
+/// Format of a Page-Directory-Pointer-Table Entry (PDPTE) that Maps a 1-GByte Page
+///
+typedef IA32_PAGE_LEAF_ENTRY_BIG_PAGESIZE IA32_PDPTE_1G;
+
+///
+/// Format of a Page-Table Entry that Maps a 4-KByte Page
+///
+typedef union {
+ struct {
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory
+ UINT64 ReadWrite : 1; // 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor : 1; // 0 = Supervisor, 1=User
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached
+ UINT64 Accessed : 1; // 0 = Not accessed, 1 = Accessed (set by CPU)
+ UINT64 Dirty : 1; // 0 = Not dirty, 1 = Dirty (set by CPU)
+ UINT64 Pat : 1; // PAT
+
+ UINT64 Global : 1; // 0 = Not global, 1 = Global (if CR4.PGE = 1)
+ UINT64 Available1 : 3; // Ignored
+
+ UINT64 PageTableBaseAddress : 40; // Page Table Base Address
+ UINT64 Available3 : 7; // Ignored
+ UINT64 ProtectionKey : 4; // Protection key
+ UINT64 Nx : 1; // No Execute bit
+ } Bits;
+ UINT64 Uint64;
+} IA32_PTE_4K;
+#define IA32_PTE4K_PAGE_TABLE_BASE_ADDRESS(pa) ((pa)->Uint64 & IA32_PE_BASE_ADDRESS_MASK_40)
+
+///
+/// Format of a Page-Directory-Pointer-Table Entry (PDPTE) that References a Page Directory (32bit PAE specific)
+///
+typedef union {
+ struct {
+ UINT64 Present : 1; // 0 = Not present in memory, 1 = Present in memory
+ UINT64 MustBeZero : 2; // Must Be Zero
+ UINT64 WriteThrough : 1; // 0 = Write-Back caching, 1=Write-Through caching
+ UINT64 CacheDisabled : 1; // 0 = Cached, 1=Non-Cached
+ UINT64 MustBeZero2 : 4; // Must Be Zero
+
+ UINT64 Available : 3; // Ignored
+
+ UINT64 PageTableBaseAddress : 40; // Page Table Base Address
+ UINT64 MustBeZero3 : 12; // Must Be Zero
+ } Bits;
+ UINT64 Uint64;
+} IA32_PDPTE_PAE;
+
+typedef union {
+ IA32_PAGE_NON_LEAF_ENTRY Pnle; // To access Pml5, Pml4, Pdpte and Pde.
+ IA32_PML5E Pml5;
+ IA32_PML4E Pml4;
+ IA32_PDPTE Pdpte;
+ IA32_PDE Pde;
+
+ IA32_PAGE_LEAF_ENTRY_BIG_PAGESIZE PleB; // to access Pdpte1G and Pde2M.
+ IA32_PDPTE_1G Pdpte1G;
+ IA32_PDE_2M Pde2M;
+
+ IA32_PTE_4K Pte4K;
+
+ IA32_PDPTE_PAE PdptePae;
+ IA32_PAGE_COMMON_ENTRY Pce; // To access all common bits in above entries.
+
+ UINT64 Uint64;
+ UINTN Uintn;
+} IA32_PAGING_ENTRY;
+
+/**
+ Return TRUE when the page table entry is a leaf entry that points to the physical address memory.
+ Return FALSE when the page table entry is a non-leaf entry that points to the page table entries.
+
+ @param[in] PagingEntry Pointer to the page table entry.
+ @param[in] Level Page level where the page table entry resides in.
+
+ @retval TRUE It's a leaf entry.
+ @retval FALSE It's a non-leaf entry.
+**/
+BOOLEAN
+IsPle (
+ IN IA32_PAGING_ENTRY *PagingEntry,
+ IN UINTN Level
+ );
+
+/**
+ Return the attribute of a 2M/1G page table entry.
+
+ @param[in] PleB Pointer to a 2M/1G page table entry.
+ @param[in] ParentMapAttribute Pointer to the parent attribute.
+
+ @return Attribute of the 2M/1G page table entry.
+**/
+UINT64
+PageTableLibGetPleBMapAttribute (
+ IN IA32_PAGE_LEAF_ENTRY_BIG_PAGESIZE *PleB,
+ IN IA32_MAP_ATTRIBUTE *ParentMapAttribute
+ );
+
+#endif
diff --git a/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableLib.inf b/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableLib.inf
new file mode 100644
index 0000000000..e4ead7441c
--- /dev/null
+++ b/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableLib.inf
@@ -0,0 +1,35 @@
+## @file
+# This library implements CpuPageTableLib that are generic for IA32 family CPU.
+#
+# Copyright (c) 2022, Intel Corporation. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = CpuPageTableLib
+ FILE_GUID = 524ed6a1-f661-451b-929b-b54d755c914a
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = CpuPageTableLib
+
+#
+# The following information is for reference only and not required by the build tools.
+#
+# VALID_ARCHITECTURES = IA32 X64
+#
+
+[Sources]
+ CpuPageTableMap.c
+ CpuPageTableParse.c
+ CpuPageTable.h
+
+[Packages]
+ MdePkg/MdePkg.dec
+ UefiCpuPkg/UefiCpuPkg.dec
+
+[LibraryClasses]
+ BaseLib
+ BaseMemoryLib
+ DebugLib
diff --git a/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableMap.c b/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableMap.c
new file mode 100644
index 0000000000..25e13a6f6f
--- /dev/null
+++ b/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableMap.c
@@ -0,0 +1,543 @@
+/** @file
+ This library implements CpuPageTableLib that are generic for IA32 family CPU.
+
+ Copyright (c) 2022, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "CpuPageTable.h"
+
+/**
+ Set the IA32_PTE_4K.
+
+ @param[in] Pte4K Pointer to IA32_PTE_4K.
+ @param[in] Offset The offset within the linear address range.
+ @param[in] Attribute The attribute of the linear address range.
+ All non-reserved fields in IA32_MAP_ATTRIBUTE are supported to set in the page table.
+ Page table entry is reset to 0 before set to the new attribute when a new physical base address is set.
+ @param[in] Mask The mask used for attribute. The corresponding field in Attribute is ignored if that in Mask is 0.
+**/
+VOID
+PageTableLibSetPte4K (
+ IN IA32_PTE_4K *Pte4K,
+ IN UINT64 Offset,
+ IN IA32_MAP_ATTRIBUTE *Attribute,
+ IN IA32_MAP_ATTRIBUTE *Mask
+ )
+{
+ if (Mask->Bits.PageTableBaseAddress) {
+ //
+ // Reset all attributes when the physical address is changed.
+ //
+ Pte4K->Uint64 = IA32_MAP_ATTRIBUTE_PAGE_TABLE_BASE_ADDRESS (Attribute) + Offset;
+ }
+
+ if (Mask->Bits.Present) {
+ Pte4K->Bits.Present = Attribute->Bits.Present;
+ }
+
+ if (Mask->Bits.ReadWrite) {
+ Pte4K->Bits.ReadWrite = Attribute->Bits.ReadWrite;
+ }
+
+ if (Mask->Bits.UserSupervisor) {
+ Pte4K->Bits.UserSupervisor = Attribute->Bits.UserSupervisor;
+ }
+
+ if (Mask->Bits.WriteThrough) {
+ Pte4K->Bits.WriteThrough = Attribute->Bits.WriteThrough;
+ }
+
+ if (Mask->Bits.CacheDisabled) {
+ Pte4K->Bits.CacheDisabled = Attribute->Bits.CacheDisabled;
+ }
+
+ if (Mask->Bits.Accessed) {
+ Pte4K->Bits.Accessed = Attribute->Bits.Accessed;
+ }
+
+ if (Mask->Bits.Dirty) {
+ Pte4K->Bits.Dirty = Attribute->Bits.Dirty;
+ }
+
+ if (Mask->Bits.Pat) {
+ Pte4K->Bits.Pat = Attribute->Bits.Pat;
+ }
+
+ if (Mask->Bits.Global) {
+ Pte4K->Bits.Global = Attribute->Bits.Global;
+ }
+
+ if (Mask->Bits.ProtectionKey) {
+ Pte4K->Bits.ProtectionKey = Attribute->Bits.ProtectionKey;
+ }
+
+ if (Mask->Bits.Nx) {
+ Pte4K->Bits.Nx = Attribute->Bits.Nx;
+ }
+}
+
+/**
+ Set the IA32_PDPTE_1G or IA32_PDE_2M.
+
+ @param[in] PleB Pointer to PDPTE_1G or PDE_2M. Both share the same structure definition.
+ @param[in] Offset The offset within the linear address range.
+ @param[in] Attribute The attribute of the linear address range.
+ All non-reserved fields in IA32_MAP_ATTRIBUTE are supported to set in the page table.
+ Page table entry is reset to 0 before set to the new attribute when a new physical base address is set.
+ @param[in] Mask The mask used for attribute. The corresponding field in Attribute is ignored if that in Mask is 0.
+**/
+VOID
+PageTableLibSetPleB (
+ IN IA32_PAGE_LEAF_ENTRY_BIG_PAGESIZE *PleB,
+ IN UINT64 Offset,
+ IN IA32_MAP_ATTRIBUTE *Attribute,
+ IN IA32_MAP_ATTRIBUTE *Mask
+ )
+{
+ if (Mask->Bits.PageTableBaseAddress) {
+ //
+ // Reset all attributes when the physical address is changed.
+ //
+ PleB->Uint64 = IA32_MAP_ATTRIBUTE_PAGE_TABLE_BASE_ADDRESS (Attribute) + Offset;
+ }
+
+ PleB->Bits.MustBeOne = 1;
+
+ if (Mask->Bits.Present) {
+ PleB->Bits.Present = Attribute->Bits.Present;
+ }
+
+ if (Mask->Bits.ReadWrite) {
+ PleB->Bits.ReadWrite = Attribute->Bits.ReadWrite;
+ }
+
+ if (Mask->Bits.UserSupervisor) {
+ PleB->Bits.UserSupervisor = Attribute->Bits.UserSupervisor;
+ }
+
+ if (Mask->Bits.WriteThrough) {
+ PleB->Bits.WriteThrough = Attribute->Bits.WriteThrough;
+ }
+
+ if (Mask->Bits.CacheDisabled) {
+ PleB->Bits.CacheDisabled = Attribute->Bits.CacheDisabled;
+ }
+
+ if (Mask->Bits.Accessed) {
+ PleB->Bits.Accessed = Attribute->Bits.Accessed;
+ }
+
+ if (Mask->Bits.Dirty) {
+ PleB->Bits.Dirty = Attribute->Bits.Dirty;
+ }
+
+ if (Mask->Bits.Pat) {
+ PleB->Bits.Pat = Attribute->Bits.Pat;
+ }
+
+ if (Mask->Bits.Global) {
+ PleB->Bits.Global = Attribute->Bits.Global;
+ }
+
+ if (Mask->Bits.ProtectionKey) {
+ PleB->Bits.ProtectionKey = Attribute->Bits.ProtectionKey;
+ }
+
+ if (Mask->Bits.Nx) {
+ PleB->Bits.Nx = Attribute->Bits.Nx;
+ }
+}
+
+/**
+ Set the IA32_PDPTE_1G, IA32_PDE_2M or IA32_PTE_4K.
+
+ @param[in] Level 3, 2 or 1.
+ @param[in] Ple Pointer to PDPTE_1G, PDE_2M or IA32_PTE_4K, depending on the Level.
+ @param[in] Offset The offset within the linear address range.
+ @param[in] Attribute The attribute of the linear address range.
+ All non-reserved fields in IA32_MAP_ATTRIBUTE are supported to set in the page table.
+ Page table entry is reset to 0 before set to the new attribute when a new physical base address is set.
+ @param[in] Mask The mask used for attribute. The corresponding field in Attribute is ignored if that in Mask is 0.
+**/
+VOID
+PageTableLibSetPle (
+ IN UINTN Level,
+ IN IA32_PAGING_ENTRY *Ple,
+ IN UINT64 Offset,
+ IN IA32_MAP_ATTRIBUTE *Attribute,
+ IN IA32_MAP_ATTRIBUTE *Mask
+ )
+{
+ if (Level == 1) {
+ PageTableLibSetPte4K (&Ple->Pte4K, Offset, Attribute, Mask);
+ } else {
+ ASSERT (Level == 2 || Level == 3);
+ PageTableLibSetPleB (&Ple->PleB, Offset, Attribute, Mask);
+ }
+}
+
+/**
+ Set the IA32_PML5, IA32_PML4, IA32_PDPTE or IA32_PDE.
+
+ @param[in] Pnle Pointer to IA32_PML5, IA32_PML4, IA32_PDPTE or IA32_PDE. All share the same structure definition.
+ @param[in] Attribute The attribute of the page directory referenced by the non-leaf.
+**/
+VOID
+PageTableLibSetPnle (
+ IN IA32_PAGE_NON_LEAF_ENTRY *Pnle,
+ IN IA32_MAP_ATTRIBUTE *Attribute
+ )
+{
+ Pnle->Bits.Present = Attribute->Bits.Present;
+ Pnle->Bits.ReadWrite = Attribute->Bits.ReadWrite;
+ Pnle->Bits.UserSupervisor = Attribute->Bits.UserSupervisor;
+ Pnle->Bits.Nx = Attribute->Bits.Nx;
+ Pnle->Bits.Accessed = 0;
+
+ //
+ // Set the attributes (WT, CD, A) to 0.
+ // WT and CD determin the memory type used to access the 4K page directory referenced by this entry.
+ // So, it implictly requires PAT[0] is Write Back.
+ // Create a new parameter if caller requires to use a different memory type for accessing page directories.
+ //
+ Pnle->Bits.WriteThrough = 0;
+ Pnle->Bits.CacheDisabled = 0;
+}
+
+/**
+ Update page table to map [LinearAddress, LinearAddress + Length) with specified attribute in the specified level.
+
+ @param[in] ParentPagingEntry The pointer to the page table entry to update.
+ @param[in] Modify FALSE to indicate Buffer is not used and BufferSize is increased by the required buffer size.
+ @param[in] Buffer The free buffer to be used for page table creation/updating.
+ When Modify is TRUE, it's used from the end.
+ When Modify is FALSE, it's ignored.
+ @param[in, out] BufferSize The available buffer size.
+ Return the remaining buffer size.
+ @param[in] Level Page table level. Could be 5, 4, 3, 2, or 1.
+ @param[in] MaxLeafLevel Maximum level that can be a leaf entry. Could be 1, 2 or 3 (if Page 1G is supported).
+ @param[in] LinearAddress The start of the linear address range.
+ @param[in] Length The length of the linear address range.
+ @param[in] Offset The offset within the linear address range.
+ @param[in] Attribute The attribute of the linear address range.
+ All non-reserved fields in IA32_MAP_ATTRIBUTE are supported to set in the page table.
+ Page table entries that map the linear address range are reset to 0 before set to the new attribute
+ when a new physical base address is set.
+ @param[in] Mask The mask used for attribute. The corresponding field in Attribute is ignored if that in Mask is 0.
+
+ @retval RETURN_SUCCESS PageTable is created/updated successfully.
+**/
+RETURN_STATUS
+PageTableLibMapInLevel (
+ IN IA32_PAGING_ENTRY *ParentPagingEntry,
+ IN BOOLEAN Modify,
+ IN VOID *Buffer,
+ IN OUT INTN *BufferSize,
+ IN UINTN Level,
+ IN UINTN MaxLeafLevel,
+ IN UINT64 LinearAddress,
+ IN UINT64 Length,
+ IN UINT64 Offset,
+ IN IA32_MAP_ATTRIBUTE *Attribute,
+ IN IA32_MAP_ATTRIBUTE *Mask
+ )
+{
+ RETURN_STATUS Status;
+ UINTN BitStart;
+ UINTN Index;
+ IA32_PAGING_ENTRY *PagingEntry;
+ UINT64 RegionLength;
+ UINT64 SubLength;
+ UINT64 SubOffset;
+ UINT64 RegionMask;
+ UINT64 RegionStart;
+ IA32_MAP_ATTRIBUTE AllOneMask;
+ IA32_MAP_ATTRIBUTE PleBAttribute;
+ IA32_MAP_ATTRIBUTE NopAttribute;
+ BOOLEAN CreateNew;
+ IA32_PAGING_ENTRY OneOfPagingEntry;
+
+ ASSERT (Level != 0);
+ ASSERT ((Attribute != NULL) && (Mask != NULL));
+
+ CreateNew = FALSE;
+ AllOneMask.Uint64 = ~0ull;
+
+ NopAttribute.Uint64 = 0;
+ NopAttribute.Bits.Present = 1;
+ NopAttribute.Bits.ReadWrite = 1;
+ NopAttribute.Bits.UserSupervisor = 1;
+
+ //
+ // ParentPagingEntry ONLY is deferenced for checking Present and MustBeOne bits
+ // when Modify is FALSE.
+ //
+
+ if (ParentPagingEntry->Pce.Present == 0) {
+ //
+ // The parent entry is CR3 or PML5E/PML4E/PDPTE/PDE.
+ // It does NOT point to an existing page directory.
+ //
+ ASSERT (Buffer == NULL || *BufferSize >= SIZE_4KB);
+ CreateNew = TRUE;
+ *BufferSize -= SIZE_4KB;
+
+ if (Modify) {
+ ParentPagingEntry->Uintn = (UINTN)Buffer + *BufferSize;
+ ZeroMem ((VOID *)ParentPagingEntry->Uintn, SIZE_4KB);
+ //
+ // Set default attribute bits for PML5E/PML4E/PDPTE/PDE.
+ //
+ PageTableLibSetPnle (&ParentPagingEntry->Pnle, &NopAttribute);
+ } else {
+ //
+ // Just make sure Present and MustBeZero (PageSize) bits are accurate.
+ //
+ OneOfPagingEntry.Pnle.Uint64 = 0;
+ }
+ } else if (IsPle (ParentPagingEntry, Level + 1)) {
+ //
+ // The parent entry is a PDPTE_1G or PDE_2M. Split to 2M or 4K pages.
+ // Note: it's impossible the parent entry is a PTE_4K.
+ //
+ //
+ // Use NOP attributes as the attribute of grand-parents because CPU will consider
+ // the actual attributes of grand-parents when determing the memory type.
+ //
+ PleBAttribute.Uint64 = PageTableLibGetPleBMapAttribute (&ParentPagingEntry->PleB, &NopAttribute);
+ if ((IA32_MAP_ATTRIBUTE_ATTRIBUTES (&PleBAttribute) & IA32_MAP_ATTRIBUTE_ATTRIBUTES (Mask))
+ == IA32_MAP_ATTRIBUTE_ATTRIBUTES (Attribute))
+ {
+ //
+ // This function is called when the memory length is less than the region length of the parent level.
+ // No need to split the page when the attributes equal.
+ //
+ return RETURN_SUCCESS;
+ }
+
+ ASSERT (Buffer == NULL || *BufferSize >= SIZE_4KB);
+ CreateNew = TRUE;
+ *BufferSize -= SIZE_4KB;
+ PageTableLibSetPle (Level, &OneOfPagingEntry, 0, &PleBAttribute, &AllOneMask);
+ if (Modify) {
+ //
+ // Create 512 child-level entries that map to 2M/4K.
+ //
+ ParentPagingEntry->Uintn = (UINTN)Buffer + *BufferSize;
+ ZeroMem ((VOID *)ParentPagingEntry->Uintn, SIZE_4KB);
+
+ //
+ // Set NOP attributes
+ // Note: Should NOT inherit the attributes from the original entry because a zero RW bit
+ // will make the entire region read-only even the child entries set the RW bit.
+ //
+ PageTableLibSetPnle (&ParentPagingEntry->Pnle, &NopAttribute);
+
+ RegionLength = REGION_LENGTH (Level);
+ PagingEntry = (IA32_PAGING_ENTRY *)(UINTN)IA32_PNLE_PAGE_TABLE_BASE_ADDRESS (&ParentPagingEntry->Pnle);
+ for (SubOffset = 0, Index = 0; Index < 512; Index++) {
+ PagingEntry[Index].Uint64 = OneOfPagingEntry.Uint64 + SubOffset;
+ SubOffset += RegionLength;
+ }
+ }
+ }
+
+ //
+ // RegionLength: 256T (1 << 48) 512G (1 << 39), 1G (1 << 30), 2M (1 << 21) or 4K (1 << 12).
+ // RegionStart: points to the linear address that's aligned on RegionLength and lower than (LinearAddress + Offset).
+ //
+ BitStart = 12 + (Level - 1) * 9;
+ Index = BitFieldRead64 (LinearAddress + Offset, BitStart, BitStart + 9 - 1);
+ RegionLength = LShiftU64 (1, BitStart);
+ RegionMask = RegionLength - 1;
+ RegionStart = (LinearAddress + Offset) & ~RegionMask;
+
+ //
+ // Apply the attribute.
+ //
+ PagingEntry = (IA32_PAGING_ENTRY *)(UINTN)IA32_PNLE_PAGE_TABLE_BASE_ADDRESS (&ParentPagingEntry->Pnle);
+ while (Offset < Length && Index < 512) {
+ SubLength = MIN (Length - Offset, RegionStart + RegionLength - (LinearAddress + Offset));
+ if ((Level <= MaxLeafLevel) && (LinearAddress + Offset == RegionStart) && (SubLength == RegionLength)) {
+ //
+ // Create one entry mapping the entire region (1G, 2M or 4K).
+ //
+ if (Modify) {
+ PageTableLibSetPle (Level, &PagingEntry[Index], Offset, Attribute, Mask);
+ }
+ } else {
+ //
+ // Recursively call to create page table.
+ // There are 3 cases:
+ // a. Level cannot be a leaf entry which points to physical memory.
+ // a. Level can be a leaf entry but (LinearAddress + Offset) is NOT aligned on the RegionStart.
+ // b. Level can be a leaf entry and (LinearAddress + Offset) is aligned on RegionStart,
+ // but the length is SMALLER than the RegionLength.
+ //
+ Status = PageTableLibMapInLevel (
+ (!Modify && CreateNew) ? &OneOfPagingEntry : &PagingEntry[Index],
+ Modify,
+ Buffer,
+ BufferSize,
+ Level - 1,
+ MaxLeafLevel,
+ LinearAddress,
+ Length,
+ Offset,
+ Attribute,
+ Mask
+ );
+ if (RETURN_ERROR (Status)) {
+ return Status;
+ }
+ }
+
+ Offset += SubLength;
+ RegionStart += RegionLength;
+ Index++;
+ }
+
+ return RETURN_SUCCESS;
+}
+
+/**
+ Create or update page table to map [LinearAddress, LinearAddress + Length) with specified attribute.
+
+ @param[in, out] PageTable The pointer to the page table to update, or pointer to NULL if a new page table is to be created.
+ @param[in] PagingMode The paging mode.
+ @param[in] Buffer The free buffer to be used for page table creation/updating.
+ @param[in, out] BufferSize The buffer size.
+ On return, the remaining buffer size.
+ The free buffer is used from the end so caller can supply the same Buffer pointer with an updated
+ BufferSize in the second call to this API.
+ @param[in] LinearAddress The start of the linear address range.
+ @param[in] Length The length of the linear address range.
+ @param[in] Attribute The attribute of the linear address range.
+ All non-reserved fields in IA32_MAP_ATTRIBUTE are supported to set in the page table.
+ Page table entries that map the linear address range are reset to 0 before set to the new attribute
+ when a new physical base address is set.
+ @param[in] Mask The mask used for attribute. The corresponding field in Attribute is ignored if that in Mask is 0.
+
+ @retval RETURN_UNSUPPORTED PagingMode is not supported.
+ @retval RETURN_INVALID_PARAMETER PageTable, BufferSize, Attribute or Mask is NULL.
+ @retval RETURN_INVALID_PARAMETER *BufferSize is not multiple of 4KB.
+ @retval RETURN_BUFFER_TOO_SMALL The buffer is too small for page table creation/updating.
+ BufferSize is updated to indicate the expected buffer size.
+ Caller may still get RETURN_BUFFER_TOO_SMALL with the new BufferSize.
+ @retval RETURN_SUCCESS PageTable is created/updated successfully.
+**/
+RETURN_STATUS
+EFIAPI
+PageTableMap (
+ IN OUT UINTN *PageTable OPTIONAL,
+ IN PAGING_MODE PagingMode,
+ IN VOID *Buffer,
+ IN OUT UINTN *BufferSize,
+ IN UINT64 LinearAddress,
+ IN UINT64 Length,
+ IN IA32_MAP_ATTRIBUTE *Attribute,
+ IN IA32_MAP_ATTRIBUTE *Mask
+ )
+{
+ RETURN_STATUS Status;
+ IA32_PAGING_ENTRY TopPagingEntry;
+ INTN RequiredSize;
+ UINT64 MaxLinearAddress;
+ UINTN MaxLevel;
+ UINTN MaxLeafLevel;
+
+ if ((PagingMode == Paging32bit) || (PagingMode == PagingPae) || (PagingMode >= PagingModeMax)) {
+ //
+ // 32bit paging is never supported.
+ // PAE paging will be supported later.
+ //
+ return RETURN_UNSUPPORTED;
+ }
+
+ if ((PageTable == NULL) || (BufferSize == NULL) || (Attribute == NULL) || (Mask == NULL)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ if (*BufferSize % SIZE_4KB != 0) {
+ //
+ // BufferSize should be multiple of 4K.
+ //
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ if ((*BufferSize != 0) && (Buffer == NULL)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ MaxLeafLevel = (UINT8)PagingMode;
+ MaxLevel = (UINT8)(PagingMode >> 8);
+ MaxLinearAddress = LShiftU64 (1, 12 + MaxLevel * 9);
+
+ if ((LinearAddress > MaxLinearAddress) || (Length > MaxLinearAddress - LinearAddress)) {
+ //
+ // Maximum linear address is (1 << 48) or (1 << 57)
+ //
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ TopPagingEntry.Uintn = *PageTable;
+ if (TopPagingEntry.Uintn != 0) {
+ TopPagingEntry.Pce.Present = 1;
+ }
+
+ //
+ // Query the required buffer size without modifying the page table.
+ //
+ RequiredSize = 0;
+ Status = PageTableLibMapInLevel (
+ &TopPagingEntry,
+ FALSE,
+ NULL,
+ &RequiredSize,
+ MaxLevel,
+ MaxLeafLevel,
+ LinearAddress,
+ Length,
+ 0,
+ Attribute,
+ Mask
+ );
+ if (RETURN_ERROR (Status)) {
+ return Status;
+ }
+
+ RequiredSize = -RequiredSize;
+
+ if ((UINTN)RequiredSize > *BufferSize) {
+ *BufferSize = RequiredSize;
+ return RETURN_BUFFER_TOO_SMALL;
+ }
+
+ if ((RequiredSize != 0) && (Buffer == NULL)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ //
+ // Update the page table when the supplied buffer is sufficient.
+ //
+ Status = PageTableLibMapInLevel (
+ &TopPagingEntry,
+ TRUE,
+ Buffer,
+ BufferSize,
+ MaxLevel,
+ MaxLeafLevel,
+ LinearAddress,
+ Length,
+ 0,
+ Attribute,
+ Mask
+ );
+ if (!RETURN_ERROR (Status)) {
+ *PageTable = (UINTN)(TopPagingEntry.Uintn & IA32_PE_BASE_ADDRESS_MASK_40);
+ }
+
+ return Status;
+}
diff --git a/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableParse.c b/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableParse.c
new file mode 100644
index 0000000000..e66961e122
--- /dev/null
+++ b/UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableParse.c
@@ -0,0 +1,330 @@
+/** @file
+ This library implements CpuPageTableLib that are generic for IA32 family CPU.
+
+ Copyright (c) 2022, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "CpuPageTable.h"
+
+/**
+ Return the attribute of a 2M/1G page table entry.
+
+ @param[in] PleB Pointer to a 2M/1G page table entry.
+ @param[in] ParentMapAttribute Pointer to the parent attribute.
+
+ @return Attribute of the 2M/1G page table entry.
+**/
+UINT64
+PageTableLibGetPleBMapAttribute (
+ IN IA32_PAGE_LEAF_ENTRY_BIG_PAGESIZE *PleB,
+ IN IA32_MAP_ATTRIBUTE *ParentMapAttribute
+ )
+{
+ IA32_MAP_ATTRIBUTE MapAttribute;
+
+ //
+ // PageTableBaseAddress cannot be assigned field to field
+ // because their bit positions are different in IA32_MAP_ATTRIBUTE and IA32_PAGE_LEAF_ENTRY_BIG_PAGESIZE.
+ //
+ MapAttribute.Uint64 = IA32_PLEB_PAGE_TABLE_BASE_ADDRESS (PleB);
+
+ MapAttribute.Bits.Present = ParentMapAttribute->Bits.Present & PleB->Bits.Present;
+ MapAttribute.Bits.ReadWrite = ParentMapAttribute->Bits.ReadWrite & PleB->Bits.ReadWrite;
+ MapAttribute.Bits.UserSupervisor = ParentMapAttribute->Bits.UserSupervisor & PleB->Bits.UserSupervisor;
+ MapAttribute.Bits.Nx = ParentMapAttribute->Bits.Nx | PleB->Bits.Nx;
+ MapAttribute.Bits.WriteThrough = PleB->Bits.WriteThrough;
+ MapAttribute.Bits.CacheDisabled = PleB->Bits.CacheDisabled;
+ MapAttribute.Bits.Accessed = PleB->Bits.Accessed;
+
+ MapAttribute.Bits.Pat = PleB->Bits.Pat;
+ MapAttribute.Bits.Dirty = PleB->Bits.Dirty;
+ MapAttribute.Bits.Global = PleB->Bits.Global;
+ MapAttribute.Bits.ProtectionKey = PleB->Bits.ProtectionKey;
+
+ return MapAttribute.Uint64;
+}
+
+/**
+ Return the attribute of a 4K page table entry.
+
+ @param[in] Pte4K Pointer to a 4K page table entry.
+ @param[in] ParentMapAttribute Pointer to the parent attribute.
+
+ @return Attribute of the 4K page table entry.
+**/
+UINT64
+PageTableLibGetPte4KMapAttribute (
+ IN IA32_PTE_4K *Pte4K,
+ IN IA32_MAP_ATTRIBUTE *ParentMapAttribute
+ )
+{
+ IA32_MAP_ATTRIBUTE MapAttribute;
+
+ MapAttribute.Uint64 = IA32_PTE4K_PAGE_TABLE_BASE_ADDRESS (Pte4K);
+
+ MapAttribute.Bits.Present = ParentMapAttribute->Bits.Present & Pte4K->Bits.Present;
+ MapAttribute.Bits.ReadWrite = ParentMapAttribute->Bits.ReadWrite & Pte4K->Bits.ReadWrite;
+ MapAttribute.Bits.UserSupervisor = ParentMapAttribute->Bits.UserSupervisor & Pte4K->Bits.UserSupervisor;
+ MapAttribute.Bits.Nx = ParentMapAttribute->Bits.Nx | Pte4K->Bits.Nx;
+ MapAttribute.Bits.WriteThrough = Pte4K->Bits.WriteThrough;
+ MapAttribute.Bits.CacheDisabled = Pte4K->Bits.CacheDisabled;
+ MapAttribute.Bits.Accessed = Pte4K->Bits.Accessed;
+
+ MapAttribute.Bits.Pat = Pte4K->Bits.Pat;
+ MapAttribute.Bits.Dirty = Pte4K->Bits.Dirty;
+ MapAttribute.Bits.Global = Pte4K->Bits.Global;
+ MapAttribute.Bits.ProtectionKey = Pte4K->Bits.ProtectionKey;
+
+ return MapAttribute.Uint64;
+}
+
+/**
+ Return the attribute of a non-leaf page table entry.
+
+ @param[in] Pnle Pointer to a non-leaf page table entry.
+ @param[in] ParentMapAttribute Pointer to the parent attribute.
+
+ @return Attribute of the non-leaf page table entry.
+**/
+UINT64
+PageTableLibGetPnleMapAttribute (
+ IN IA32_PAGE_NON_LEAF_ENTRY *Pnle,
+ IN IA32_MAP_ATTRIBUTE *ParentMapAttribute
+ )
+{
+ IA32_MAP_ATTRIBUTE MapAttribute;
+
+ MapAttribute.Uint64 = Pnle->Uint64;
+
+ MapAttribute.Bits.Present = ParentMapAttribute->Bits.Present & Pnle->Bits.Present;
+ MapAttribute.Bits.ReadWrite = ParentMapAttribute->Bits.ReadWrite & Pnle->Bits.ReadWrite;
+ MapAttribute.Bits.UserSupervisor = ParentMapAttribute->Bits.UserSupervisor & Pnle->Bits.UserSupervisor;
+ MapAttribute.Bits.Nx = ParentMapAttribute->Bits.Nx | Pnle->Bits.Nx;
+ MapAttribute.Bits.WriteThrough = Pnle->Bits.WriteThrough;
+ MapAttribute.Bits.CacheDisabled = Pnle->Bits.CacheDisabled;
+ MapAttribute.Bits.Accessed = Pnle->Bits.Accessed;
+ return MapAttribute.Uint64;
+}
+
+/**
+ Return TRUE when the page table entry is a leaf entry that points to the physical address memory.
+ Return FALSE when the page table entry is a non-leaf entry that points to the page table entries.
+
+ @param[in] PagingEntry Pointer to the page table entry.
+ @param[in] Level Page level where the page table entry resides in.
+
+ @retval TRUE It's a leaf entry.
+ @retval FALSE It's a non-leaf entry.
+**/
+BOOLEAN
+IsPle (
+ IN IA32_PAGING_ENTRY *PagingEntry,
+ IN UINTN Level
+ )
+{
+ //
+ // PML5E and PML4E are always non-leaf entries.
+ //
+ if (Level == 1) {
+ return TRUE;
+ }
+
+ if (((Level == 3) || (Level == 2))) {
+ if (PagingEntry->PleB.Bits.MustBeOne == 1) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+/**
+ Recursively parse the non-leaf page table entries.
+
+ @param[in] PageTableBaseAddress The base address of the 512 non-leaf page table entries in the specified level.
+ @param[in] Level Page level. Could be 5, 4, 3, 2, 1.
+ @param[in] RegionStart The base linear address of the region covered by the non-leaf page table entries.
+ @param[in] ParentMapAttribute The mapping attribute of the parent entries.
+ @param[in, out] Map Pointer to an array that describes multiple linear address ranges.
+ @param[in, out] MapCount Pointer to a UINTN that hold the actual number of entries in the Map.
+ @param[in] MapCapacity The maximum number of entries the Map can hold.
+ @param[in] LastEntry Pointer to last map entry.
+ @param[in] OneEntry Pointer to a library internal storage that holds one map entry.
+ It's used when Map array is used up.
+**/
+VOID
+PageTableLibParsePnle (
+ IN UINT64 PageTableBaseAddress,
+ IN UINTN Level,
+ IN UINT64 RegionStart,
+ IN IA32_MAP_ATTRIBUTE *ParentMapAttribute,
+ IN OUT IA32_MAP_ENTRY *Map,
+ IN OUT UINTN *MapCount,
+ IN UINTN MapCapacity,
+ IN IA32_MAP_ENTRY **LastEntry,
+ IN IA32_MAP_ENTRY *OneEntry
+ )
+{
+ IA32_PAGING_ENTRY *PagingEntry;
+ UINTN Index;
+ IA32_MAP_ATTRIBUTE MapAttribute;
+ UINT64 RegionLength;
+
+ ASSERT (OneEntry != NULL);
+
+ PagingEntry = (IA32_PAGING_ENTRY *)(UINTN)PageTableBaseAddress;
+ RegionLength = REGION_LENGTH (Level);
+
+ for (Index = 0; Index < 512; Index++, RegionStart += RegionLength) {
+ if (PagingEntry[Index].Pce.Present == 0) {
+ continue;
+ }
+
+ if (IsPle (&PagingEntry[Index], Level)) {
+ ASSERT (Level == 1 || Level == 2 || Level == 3);
+
+ if (Level == 1) {
+ MapAttribute.Uint64 = PageTableLibGetPte4KMapAttribute (&PagingEntry[Index].Pte4K, ParentMapAttribute);
+ } else {
+ MapAttribute.Uint64 = PageTableLibGetPleBMapAttribute (&PagingEntry[Index].PleB, ParentMapAttribute);
+ }
+
+ if ((*LastEntry != NULL) &&
+ ((*LastEntry)->LinearAddress + (*LastEntry)->Length == RegionStart) &&
+ (IA32_MAP_ATTRIBUTE_PAGE_TABLE_BASE_ADDRESS (&(*LastEntry)->Attribute) + (*LastEntry)->Length
+ == IA32_MAP_ATTRIBUTE_PAGE_TABLE_BASE_ADDRESS (&MapAttribute)) &&
+ (IA32_MAP_ATTRIBUTE_ATTRIBUTES (&(*LastEntry)->Attribute) == IA32_MAP_ATTRIBUTE_ATTRIBUTES (&MapAttribute))
+ )
+ {
+ //
+ // Extend LastEntry.
+ //
+ (*LastEntry)->Length += RegionLength;
+ } else {
+ if (*MapCount < MapCapacity) {
+ //
+ // LastEntry points to next map entry in the array.
+ //
+ *LastEntry = &Map[*MapCount];
+ } else {
+ //
+ // LastEntry points to library internal map entry.
+ //
+ *LastEntry = OneEntry;
+ }
+
+ //
+ // Set LastEntry.
+ //
+ (*LastEntry)->LinearAddress = RegionStart;
+ (*LastEntry)->Length = RegionLength;
+ (*LastEntry)->Attribute.Uint64 = MapAttribute.Uint64;
+ (*MapCount)++;
+ }
+ } else {
+ MapAttribute.Uint64 = PageTableLibGetPnleMapAttribute (&PagingEntry[Index].Pnle, ParentMapAttribute);
+ PageTableLibParsePnle (
+ IA32_PNLE_PAGE_TABLE_BASE_ADDRESS (&PagingEntry[Index].Pnle),
+ Level - 1,
+ RegionStart,
+ &MapAttribute,
+ Map,
+ MapCount,
+ MapCapacity,
+ LastEntry,
+ OneEntry
+ );
+ }
+ }
+}
+
+/**
+ Parse page table.
+
+ @param[in] PageTable Pointer to the page table.
+ @param[in] PagingMode The paging mode.
+ @param[out] Map Return an array that describes multiple linear address ranges.
+ @param[in, out] MapCount On input, the maximum number of entries that Map can hold.
+ On output, the number of entries in Map.
+
+ @retval RETURN_UNSUPPORTED PageLevel is not 5 or 4.
+ @retval RETURN_INVALID_PARAMETER MapCount is NULL.
+ @retval RETURN_INVALID_PARAMETER *MapCount is not 0 but Map is NULL.
+ @retval RETURN_BUFFER_TOO_SMALL *MapCount is too small.
+ @retval RETURN_SUCCESS Page table is parsed successfully.
+**/
+RETURN_STATUS
+EFIAPI
+PageTableParse (
+ IN UINTN PageTable,
+ IN PAGING_MODE PagingMode,
+ OUT IA32_MAP_ENTRY *Map,
+ IN OUT UINTN *MapCount
+ )
+{
+ UINTN MapCapacity;
+ IA32_MAP_ATTRIBUTE NopAttribute;
+ IA32_MAP_ENTRY *LastEntry;
+ IA32_MAP_ENTRY OneEntry;
+ UINTN MaxLevel;
+
+ if ((PagingMode == Paging32bit) || (PagingMode == PagingPae) || (PagingMode >= PagingModeMax)) {
+ //
+ // 32bit paging is never supported.
+ // PAE paging will be supported later.
+ //
+ return RETURN_UNSUPPORTED;
+ }
+
+ if (MapCount == NULL) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ if ((*MapCount != 0) && (Map == NULL)) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ if (PageTable == 0) {
+ *MapCount = 0;
+ return RETURN_SUCCESS;
+ }
+
+ //
+ // Page table layout is as below:
+ //
+ // [IA32_CR3]
+ // |
+ // |
+ // V
+ // [IA32_PML5E]
+ // ...
+ // [IA32_PML5E] --> [IA32_PML4E]
+ // ...
+ // [IA32_PML4E] --> [IA32_PDPTE_1G] --> 1G aligned physical address
+ // ...
+ // [IA32_PDPTE] --> [IA32_PDE_2M] --> 2M aligned physical address
+ // ...
+ // [IA32_PDE] --> [IA32_PTE_4K] --> 4K aligned physical address
+ // ...
+ // [IA32_PTE_4K] --> 4K aligned physical address
+ //
+
+ NopAttribute.Uint64 = 0;
+ NopAttribute.Bits.Present = 1;
+ NopAttribute.Bits.ReadWrite = 1;
+ NopAttribute.Bits.UserSupervisor = 1;
+
+ MaxLevel = (UINT8)(PagingMode >> 8);
+ MapCapacity = *MapCount;
+ *MapCount = 0;
+ LastEntry = NULL;
+ PageTableLibParsePnle ((UINT64)PageTable, MaxLevel, 0, &NopAttribute, Map, MapCount, MapCapacity, &LastEntry, &OneEntry);
+
+ if (*MapCount > MapCapacity) {
+ return RETURN_BUFFER_TOO_SMALL;
+ }
+
+ return RETURN_SUCCESS;
+}
diff --git a/UefiCpuPkg/UefiCpuPkg.dec b/UefiCpuPkg/UefiCpuPkg.dec
index 1951eb294c..4fe79cecbf 100644
--- a/UefiCpuPkg/UefiCpuPkg.dec
+++ b/UefiCpuPkg/UefiCpuPkg.dec
@@ -62,6 +62,9 @@
## @libraryclass Provides function for loading microcode.
MicrocodeLib|Include/Library/MicrocodeLib.h
+ ## @libraryclass Provides function for manipulating x86 paging structures.
+ CpuPageTableLib|Include/Library/CpuPageTableLib.h
+
[Guids]
gUefiCpuPkgTokenSpaceGuid = { 0xac05bf33, 0x995a, 0x4ed4, { 0xaa, 0xb8, 0xef, 0x7a, 0xe8, 0xf, 0x5c, 0xb0 }}
gMsegSmramGuid = { 0x5802bce4, 0xeeee, 0x4e33, { 0xa1, 0x30, 0xeb, 0xad, 0x27, 0xf0, 0xe4, 0x39 }}
diff --git a/UefiCpuPkg/UefiCpuPkg.dsc b/UefiCpuPkg/UefiCpuPkg.dsc
index a0bbde9985..f694b3a77c 100644
--- a/UefiCpuPkg/UefiCpuPkg.dsc
+++ b/UefiCpuPkg/UefiCpuPkg.dsc
@@ -1,7 +1,7 @@
## @file
# UefiCpuPkg Package
#
-# Copyright (c) 2007 - 2021, Intel Corporation. All rights reserved.<BR>
+# Copyright (c) 2007 - 2022, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
@@ -62,6 +62,7 @@
VmgExitLib|UefiCpuPkg/Library/VmgExitLibNull/VmgExitLibNull.inf
MicrocodeLib|UefiCpuPkg/Library/MicrocodeLib/MicrocodeLib.inf
SmmCpuRendezvousLib|UefiCpuPkg/Library/SmmCpuRendezvousLib/SmmCpuRendezvousLib.inf
+ CpuPageTableLib|UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableLib.inf
[LibraryClasses.common.SEC]
PlatformSecLib|UefiCpuPkg/Library/PlatformSecLibNull/PlatformSecLibNull.inf
@@ -175,6 +176,7 @@
UefiCpuPkg/Universal/Acpi/S3Resume2Pei/S3Resume2Pei.inf
UefiCpuPkg/ResetVector/Vtf0/Bin/ResetVector.inf
UefiCpuPkg/Library/SmmCpuRendezvousLib/SmmCpuRendezvousLib.inf
+ UefiCpuPkg/Library/CpuPageTableLib/CpuPageTableLib.inf
[BuildOptions]
*_*_*_CC_FLAGS = -D DISABLE_NEW_DEPRECATED_INTERFACES
--
2.35.1.windows.2
next prev parent reply other threads:[~2022-07-18 13:18 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-18 13:18 [PATCH 00/10] UefiCpuPkg: Create CpuPageTableLib for manipulating X86 paging structs Ni, Ray
2022-07-18 13:18 ` Ni, Ray [this message]
2022-07-18 13:49 ` [edk2-devel] [PATCH 01/10] " Gerd Hoffmann
2022-07-19 8:17 ` Ni, Ray
2022-08-15 16:23 ` Lendacky, Thomas
2022-08-16 2:25 ` Ni, Ray
2022-07-18 13:18 ` [PATCH 02/10] UefiCpuPkg/CpuPageTableLib: Return error on invalid parameters Ni, Ray
2022-07-18 13:18 ` [PATCH 03/10] CpuPageTableLib: Fix a bug when a bit is 1 in Attribute, 0 in Mask Ni, Ray
2022-07-18 13:18 ` [PATCH 04/10] CpuPageTableLib: Refactor the logic Ni, Ray
2022-07-18 13:18 ` [PATCH 05/10] CpuPageTableLib: Split the page entry when LA is aligned but PA is not Ni, Ray
2022-07-18 13:18 ` [PATCH 06/10] CpuPageTableLib: Avoid treating non-leaf entry as leaf one Ni, Ray
2022-07-18 13:18 ` [PATCH 07/10] CpuPageTableLib: Fix parent attributes are not inherited properly Ni, Ray
2022-07-18 13:18 ` [PATCH 08/10] CpuPageTableLib: Fix a bug to avoid unnecessary changing to page table Ni, Ray
2022-07-18 13:18 ` [PATCH 09/10] CpuPageTableLib: Fix bug that wrongly requires extra size for mapping Ni, Ray
2022-07-18 13:18 ` [PATCH 10/10] CpuPageTableLib: define IA32_PAGE_LEVEL enum type internally Ni, Ray
2022-08-09 3:46 ` [edk2-devel] [PATCH 00/10] UefiCpuPkg: Create CpuPageTableLib for manipulating X86 paging structs Dong, Eric
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-list from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220718131831.660-2-ray.ni@intel.com \
--to=devel@edk2.groups.io \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox