* [PATCH] IntelSiliconPkg/Vtd: Add Vtd core drivers
@ 2023-03-02 6:10 Sheng Wei
0 siblings, 0 replies; 2+ messages in thread
From: Sheng Wei @ 2023-03-02 6:10 UTC (permalink / raw)
To: devel; +Cc: Ray Ni, Rangasai V Chaganty, Jenny Huang, Robert Kowalewski
Add 2 drivers (IntelVTdCorePei, IntelVTdCoreDxe)
for pre-boot DMA protection feature.
Change-Id: I306a10b33a1fc4f80fa63c1fc1fb559aa054aa8c
Signed-off-by: Sheng Wei <w.sheng@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Rangasai V Chaganty <rangasai.v.chaganty@intel.com>
Cc: Jenny Huang <jenny.huang@intel.com>
Cc: Robert Kowalewski <robert.kowalewski@intel.com>
---
.../Feature/VTd/IntelVTdCoreDxe/BmDma.c | 547 +++++
.../VTd/IntelVTdCoreDxe/DmaProtection.c | 703 +++++++
.../VTd/IntelVTdCoreDxe/DmaProtection.h | 658 ++++++
.../VTd/IntelVTdCoreDxe/DmarAcpiTable.c | 398 ++++
.../VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c | 782 +++++++
.../VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf | 92 +
.../VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni | 14 +
| 14 +
.../Feature/VTd/IntelVTdCoreDxe/PciInfo.c | 419 ++++
.../VTd/IntelVTdCoreDxe/TranslationTable.c | 1112 ++++++++++
.../VTd/IntelVTdCoreDxe/TranslationTableEx.c | 108 +
.../Feature/VTd/IntelVTdCoreDxe/VtdReg.c | 759 +++++++
.../Feature/VTd/IntelVTdCorePei/DmarTable.c | 301 +++
.../VTd/IntelVTdCorePei/IntelVTdCorePei.c | 1099 ++++++++++
.../VTd/IntelVTdCorePei/IntelVTdCorePei.h | 272 +++
.../VTd/IntelVTdCorePei/IntelVTdCorePei.inf | 70 +
.../VTd/IntelVTdCorePei/IntelVTdCorePei.uni | 14 +
| 14 +
.../VTd/IntelVTdCorePei/IntelVTdDmar.c | 731 +++++++
.../VTd/IntelVTdCorePei/TranslationTable.c | 926 +++++++++
.../Include/Guid/VtdLogDataHob.h | 151 ++
.../Include/Library/IntelVTdPeiDxeLib.h | 329 +++
.../IntelSiliconPkg/Include/Protocol/VtdLog.h | 59 +
.../Intel/IntelSiliconPkg/IntelSiliconPkg.dec | 21 +
.../Intel/IntelSiliconPkg/IntelSiliconPkg.dsc | 1 +
.../IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c | 1810 +++++++++++++++++
.../IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf | 30 +
.../IntelVTdPeiDxeLibExt.inf | 34 +
28 files changed, 11468 insertions(+)
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/BmDma.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmarAcpiTable.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxeExtra.uni
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/PciInfo.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTable.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTableEx.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdReg.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/DmarTable.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.inf
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.uni
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePeiExtra.uni
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdDmar.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/TranslationTable.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Include/Guid/VtdLogDataHob.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Include/Library/IntelVTdPeiDxeLib.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Include/Protocol/VtdLog.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf
create mode 100644 Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLibExt.inf
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/BmDma.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/BmDma.c
new file mode 100644
index 000000000..41917a004
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/BmDma.c
@@ -0,0 +1,547 @@
+/** @file
+ BmDma related function
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+// TBD: May make it a policy
+#define DMA_MEMORY_TOP MAX_UINTN
+//#define DMA_MEMORY_TOP 0x0000000001FFFFFFULL
+
+#define MAP_HANDLE_INFO_SIGNATURE SIGNATURE_32 ('H', 'M', 'A', 'P')
+typedef struct {
+ UINT32 Signature;
+ LIST_ENTRY Link;
+ EFI_HANDLE DeviceHandle;
+ UINT64 IoMmuAccess;
+} MAP_HANDLE_INFO;
+#define MAP_HANDLE_INFO_FROM_LINK(a) CR (a, MAP_HANDLE_INFO, Link, MAP_HANDLE_INFO_SIGNATURE)
+
+#define MAP_INFO_SIGNATURE SIGNATURE_32 ('D', 'M', 'A', 'P')
+typedef struct {
+ UINT32 Signature;
+ LIST_ENTRY Link;
+ EDKII_IOMMU_OPERATION Operation;
+ UINTN NumberOfBytes;
+ UINTN NumberOfPages;
+ EFI_PHYSICAL_ADDRESS HostAddress;
+ EFI_PHYSICAL_ADDRESS DeviceAddress;
+ LIST_ENTRY HandleList;
+} MAP_INFO;
+#define MAP_INFO_FROM_LINK(a) CR (a, MAP_INFO, Link, MAP_INFO_SIGNATURE)
+
+LIST_ENTRY gMaps = INITIALIZE_LIST_HEAD_VARIABLE(gMaps);
+
+/**
+ This function fills DeviceHandle/IoMmuAccess to the MAP_HANDLE_INFO,
+ based upon the DeviceAddress.
+
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] DeviceAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+**/
+VOID
+SyncDeviceHandleToMapInfo (
+ IN EFI_HANDLE DeviceHandle,
+ IN EFI_PHYSICAL_ADDRESS DeviceAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ MAP_INFO *MapInfo;
+ MAP_HANDLE_INFO *MapHandleInfo;
+ LIST_ENTRY *Link;
+ EFI_TPL OriginalTpl;
+
+ //
+ // Find MapInfo according to DeviceAddress
+ //
+ OriginalTpl = gBS->RaiseTPL (VTD_TPL_LEVEL);
+ MapInfo = NULL;
+ for (Link = GetFirstNode (&gMaps)
+ ; !IsNull (&gMaps, Link)
+ ; Link = GetNextNode (&gMaps, Link)
+ ) {
+ MapInfo = MAP_INFO_FROM_LINK (Link);
+ if (MapInfo->DeviceAddress == DeviceAddress) {
+ break;
+ }
+ }
+ if ((MapInfo == NULL) || (MapInfo->DeviceAddress != DeviceAddress)) {
+ DEBUG ((DEBUG_ERROR, "SyncDeviceHandleToMapInfo: DeviceAddress(0x%lx) - not found\n", DeviceAddress));
+ gBS->RestoreTPL (OriginalTpl);
+ return ;
+ }
+
+ //
+ // Find MapHandleInfo according to DeviceHandle
+ //
+ MapHandleInfo = NULL;
+ for (Link = GetFirstNode (&MapInfo->HandleList)
+ ; !IsNull (&MapInfo->HandleList, Link)
+ ; Link = GetNextNode (&MapInfo->HandleList, Link)
+ ) {
+ MapHandleInfo = MAP_HANDLE_INFO_FROM_LINK (Link);
+ if (MapHandleInfo->DeviceHandle == DeviceHandle) {
+ break;
+ }
+ }
+ if ((MapHandleInfo != NULL) && (MapHandleInfo->DeviceHandle == DeviceHandle)) {
+ MapHandleInfo->IoMmuAccess = IoMmuAccess;
+ gBS->RestoreTPL (OriginalTpl);
+ return ;
+ }
+
+ //
+ // No DeviceHandle
+ // Initialize and insert the MAP_HANDLE_INFO structure
+ //
+ MapHandleInfo = AllocatePool (sizeof (MAP_HANDLE_INFO));
+ if (MapHandleInfo == NULL) {
+ DEBUG ((DEBUG_ERROR, "SyncDeviceHandleToMapInfo: %r\n", EFI_OUT_OF_RESOURCES));
+ gBS->RestoreTPL (OriginalTpl);
+ return ;
+ }
+
+ MapHandleInfo->Signature = MAP_HANDLE_INFO_SIGNATURE;
+ MapHandleInfo->DeviceHandle = DeviceHandle;
+ MapHandleInfo->IoMmuAccess = IoMmuAccess;
+
+ InsertTailList (&MapInfo->HandleList, &MapHandleInfo->Link);
+ gBS->RestoreTPL (OriginalTpl);
+
+ return ;
+}
+
+/**
+ Provides the controller-specific addresses required to access system memory from a
+ DMA bus master.
+
+ @param This The protocol instance pointer.
+ @param Operation Indicates if the bus master is going to read or write to system memory.
+ @param HostAddress The system memory address to map to the PCI controller.
+ @param NumberOfBytes On input the number of bytes to map. On output the number of bytes
+ that were mapped.
+ @param DeviceAddress The resulting map address for the bus master PCI controller to use to
+ access the hosts HostAddress.
+ @param Mapping A resulting value to pass to Unmap().
+
+ @retval EFI_SUCCESS The range was mapped for the returned NumberOfBytes.
+ @retval EFI_UNSUPPORTED The HostAddress cannot be mapped as a common buffer.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources.
+ @retval EFI_DEVICE_ERROR The system hardware could not map the requested address.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuMap (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EDKII_IOMMU_OPERATION Operation,
+ IN VOID *HostAddress,
+ IN OUT UINTN *NumberOfBytes,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT VOID **Mapping
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;
+ MAP_INFO *MapInfo;
+ EFI_PHYSICAL_ADDRESS DmaMemoryTop;
+ BOOLEAN NeedRemap;
+ EFI_TPL OriginalTpl;
+
+ if (NumberOfBytes == NULL || DeviceAddress == NULL ||
+ Mapping == NULL) {
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuMap: ==> 0x%08x - 0x%08x (%x)\n", HostAddress, *NumberOfBytes, Operation));
+
+ //
+ // Make sure that Operation is valid
+ //
+ if ((UINT32) Operation >= EdkiiIoMmuOperationMaximum) {
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+ NeedRemap = FALSE;
+ PhysicalAddress = (EFI_PHYSICAL_ADDRESS) (UINTN) HostAddress;
+
+ DmaMemoryTop = DMA_MEMORY_TOP;
+
+ //
+ // Alignment check
+ //
+ if ((*NumberOfBytes != ALIGN_VALUE(*NumberOfBytes, SIZE_4KB)) ||
+ (PhysicalAddress != ALIGN_VALUE(PhysicalAddress, SIZE_4KB))) {
+ if ((Operation == EdkiiIoMmuOperationBusMasterCommonBuffer) ||
+ (Operation == EdkiiIoMmuOperationBusMasterCommonBuffer64)) {
+ //
+ // The input buffer might be a subset from IoMmuAllocateBuffer.
+ // Skip the check.
+ //
+ } else {
+ NeedRemap = TRUE;
+ }
+ }
+
+ if ((PhysicalAddress + *NumberOfBytes) >= DMA_MEMORY_TOP) {
+ NeedRemap = TRUE;
+ }
+
+ if (((Operation != EdkiiIoMmuOperationBusMasterRead64 &&
+ Operation != EdkiiIoMmuOperationBusMasterWrite64 &&
+ Operation != EdkiiIoMmuOperationBusMasterCommonBuffer64)) &&
+ ((PhysicalAddress + *NumberOfBytes) > SIZE_4GB)) {
+ //
+ // If the root bridge or the device cannot handle performing DMA above
+ // 4GB but any part of the DMA transfer being mapped is above 4GB, then
+ // map the DMA transfer to a buffer below 4GB.
+ //
+ NeedRemap = TRUE;
+ DmaMemoryTop = MIN (DmaMemoryTop, SIZE_4GB - 1);
+ }
+
+ if (Operation == EdkiiIoMmuOperationBusMasterCommonBuffer ||
+ Operation == EdkiiIoMmuOperationBusMasterCommonBuffer64) {
+ if (NeedRemap) {
+ //
+ // Common Buffer operations can not be remapped. If the common buffer
+ // is above 4GB, then it is not possible to generate a mapping, so return
+ // an error.
+ //
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", EFI_UNSUPPORTED));
+ return EFI_UNSUPPORTED;
+ }
+ }
+
+ //
+ // Allocate a MAP_INFO structure to remember the mapping when Unmap() is
+ // called later.
+ //
+ MapInfo = AllocatePool (sizeof (MAP_INFO));
+ if (MapInfo == NULL) {
+ *NumberOfBytes = 0;
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", EFI_OUT_OF_RESOURCES));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ //
+ // Initialize the MAP_INFO structure
+ //
+ MapInfo->Signature = MAP_INFO_SIGNATURE;
+ MapInfo->Operation = Operation;
+ MapInfo->NumberOfBytes = *NumberOfBytes;
+ MapInfo->NumberOfPages = EFI_SIZE_TO_PAGES (MapInfo->NumberOfBytes);
+ MapInfo->HostAddress = PhysicalAddress;
+ MapInfo->DeviceAddress = DmaMemoryTop;
+ InitializeListHead(&MapInfo->HandleList);
+
+ //
+ // Allocate a buffer below 4GB to map the transfer to.
+ //
+ if (NeedRemap) {
+ Status = gBS->AllocatePages (
+ AllocateMaxAddress,
+ EfiBootServicesData,
+ MapInfo->NumberOfPages,
+ &MapInfo->DeviceAddress
+ );
+ if (EFI_ERROR (Status)) {
+ FreePool (MapInfo);
+ *NumberOfBytes = 0;
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", Status));
+ return Status;
+ }
+
+ //
+ // If this is a read operation from the Bus Master's point of view,
+ // then copy the contents of the real buffer into the mapped buffer
+ // so the Bus Master can read the contents of the real buffer.
+ //
+ if (Operation == EdkiiIoMmuOperationBusMasterRead ||
+ Operation == EdkiiIoMmuOperationBusMasterRead64) {
+ CopyMem (
+ (VOID *) (UINTN) MapInfo->DeviceAddress,
+ (VOID *) (UINTN) MapInfo->HostAddress,
+ MapInfo->NumberOfBytes
+ );
+ }
+ } else {
+ MapInfo->DeviceAddress = MapInfo->HostAddress;
+ }
+
+ OriginalTpl = gBS->RaiseTPL (VTD_TPL_LEVEL);
+ InsertTailList (&gMaps, &MapInfo->Link);
+ gBS->RestoreTPL (OriginalTpl);
+
+ //
+ // The DeviceAddress is the address of the maped buffer below 4GB
+ //
+ *DeviceAddress = MapInfo->DeviceAddress;
+ //
+ // Return a pointer to the MAP_INFO structure in Mapping
+ //
+ *Mapping = MapInfo;
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuMap: 0x%08x - 0x%08x <==\n", *DeviceAddress, *Mapping));
+
+ VTdLogAddEvent (VTDLOG_DXE_IOMMU_MAP, (UINT64) (*DeviceAddress), (UINT64) Operation);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Completes the Map() operation and releases any corresponding resources.
+
+ @param This The protocol instance pointer.
+ @param Mapping The mapping value returned from Map().
+
+ @retval EFI_SUCCESS The range was unmapped.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_DEVICE_ERROR The data was not committed to the target system memory.
+**/
+EFI_STATUS
+EFIAPI
+IoMmuUnmap (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN VOID *Mapping
+ )
+{
+ MAP_INFO *MapInfo;
+ MAP_HANDLE_INFO *MapHandleInfo;
+ LIST_ENTRY *Link;
+ EFI_TPL OriginalTpl;
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuUnmap: 0x%08x\n", Mapping));
+
+ if (Mapping == NULL) {
+ DEBUG ((DEBUG_ERROR, "IoMmuUnmap: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ OriginalTpl = gBS->RaiseTPL (VTD_TPL_LEVEL);
+ MapInfo = NULL;
+ for (Link = GetFirstNode (&gMaps)
+ ; !IsNull (&gMaps, Link)
+ ; Link = GetNextNode (&gMaps, Link)
+ ) {
+ MapInfo = MAP_INFO_FROM_LINK (Link);
+ if (MapInfo == Mapping) {
+ break;
+ }
+ }
+ //
+ // Mapping is not a valid value returned by Map()
+ //
+ if (MapInfo != Mapping) {
+ gBS->RestoreTPL (OriginalTpl);
+ DEBUG ((DEBUG_ERROR, "IoMmuUnmap: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+ RemoveEntryList (&MapInfo->Link);
+ gBS->RestoreTPL (OriginalTpl);
+
+ //
+ // remove all nodes in MapInfo->HandleList
+ //
+ while (!IsListEmpty (&MapInfo->HandleList)) {
+ MapHandleInfo = MAP_HANDLE_INFO_FROM_LINK (MapInfo->HandleList.ForwardLink);
+ RemoveEntryList (&MapHandleInfo->Link);
+ FreePool (MapHandleInfo);
+ }
+
+ if (MapInfo->DeviceAddress != MapInfo->HostAddress) {
+ //
+ // If this is a write operation from the Bus Master's point of view,
+ // then copy the contents of the mapped buffer into the real buffer
+ // so the processor can read the contents of the real buffer.
+ //
+ if (MapInfo->Operation == EdkiiIoMmuOperationBusMasterWrite ||
+ MapInfo->Operation == EdkiiIoMmuOperationBusMasterWrite64) {
+ CopyMem (
+ (VOID *) (UINTN) MapInfo->HostAddress,
+ (VOID *) (UINTN) MapInfo->DeviceAddress,
+ MapInfo->NumberOfBytes
+ );
+ }
+
+ //
+ // Free the mapped buffer and the MAP_INFO structure.
+ //
+ gBS->FreePages (MapInfo->DeviceAddress, MapInfo->NumberOfPages);
+ }
+
+ VTdLogAddEvent (VTDLOG_DXE_IOMMU_UNMAP, MapInfo->NumberOfBytes, MapInfo->DeviceAddress);
+
+ FreePool (Mapping);
+ return EFI_SUCCESS;
+}
+
+/**
+ Allocates pages that are suitable for an OperationBusMasterCommonBuffer or
+ OperationBusMasterCommonBuffer64 mapping.
+
+ @param This The protocol instance pointer.
+ @param Type This parameter is not used and must be ignored.
+ @param MemoryType The type of memory to allocate, EfiBootServicesData or
+ EfiRuntimeServicesData.
+ @param Pages The number of pages to allocate.
+ @param HostAddress A pointer to store the base system memory address of the
+ allocated range.
+ @param Attributes The requested bit mask of attributes for the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were allocated.
+ @retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are
+ MEMORY_WRITE_COMBINE, MEMORY_CACHED and DUAL_ADDRESS_CYCLE.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuAllocateBuffer (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EFI_ALLOCATE_TYPE Type,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN Pages,
+ IN OUT VOID **HostAddress,
+ IN UINT64 Attributes
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuAllocateBuffer: ==> 0x%08x\n", Pages));
+
+ //
+ // Validate Attributes
+ //
+ if ((Attributes & EDKII_IOMMU_ATTRIBUTE_INVALID_FOR_ALLOCATE_BUFFER) != 0) {
+ DEBUG ((DEBUG_ERROR, "IoMmuAllocateBuffer: %r\n", EFI_UNSUPPORTED));
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Check for invalid inputs
+ //
+ if (HostAddress == NULL) {
+ DEBUG ((DEBUG_ERROR, "IoMmuAllocateBuffer: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // The only valid memory types are EfiBootServicesData and
+ // EfiRuntimeServicesData
+ //
+ if (MemoryType != EfiBootServicesData &&
+ MemoryType != EfiRuntimeServicesData) {
+ DEBUG ((DEBUG_ERROR, "IoMmuAllocateBuffer: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ PhysicalAddress = DMA_MEMORY_TOP;
+ if ((Attributes & EDKII_IOMMU_ATTRIBUTE_DUAL_ADDRESS_CYCLE) == 0) {
+ //
+ // Limit allocations to memory below 4GB
+ //
+ PhysicalAddress = MIN (PhysicalAddress, SIZE_4GB - 1);
+ }
+ Status = gBS->AllocatePages (
+ AllocateMaxAddress,
+ MemoryType,
+ Pages,
+ &PhysicalAddress
+ );
+ if (!EFI_ERROR (Status)) {
+ *HostAddress = (VOID *) (UINTN) PhysicalAddress;
+
+ VTdLogAddEvent (VTDLOG_DXE_IOMMU_ALLOC_BUFFER, (UINT64) Pages, (UINT64) (*HostAddress));
+ }
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuAllocateBuffer: 0x%08x <==\n", *HostAddress));
+
+ return Status;
+}
+
+/**
+ Frees memory that was allocated with AllocateBuffer().
+
+ @param This The protocol instance pointer.
+ @param Pages The number of pages to free.
+ @param HostAddress The base system memory address of the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were freed.
+ @retval EFI_INVALID_PARAMETER The memory range specified by HostAddress and Pages
+ was not allocated with AllocateBuffer().
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuFreeBuffer (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN UINTN Pages,
+ IN VOID *HostAddress
+ )
+{
+ DEBUG ((DEBUG_VERBOSE, "IoMmuFreeBuffer: 0x%\n", Pages));
+
+ VTdLogAddEvent (VTDLOG_DXE_IOMMU_FREE_BUFFER, Pages, (UINT64) HostAddress);
+
+ return gBS->FreePages ((EFI_PHYSICAL_ADDRESS) (UINTN) HostAddress, Pages);
+}
+
+/**
+ Get device information from mapping.
+
+ @param[in] Mapping The mapping.
+ @param[out] DeviceAddress The device address of the mapping.
+ @param[out] NumberOfPages The number of pages of the mapping.
+
+ @retval EFI_SUCCESS The device information is returned.
+ @retval EFI_INVALID_PARAMETER The mapping is invalid.
+**/
+EFI_STATUS
+GetDeviceInfoFromMapping (
+ IN VOID *Mapping,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT UINTN *NumberOfPages
+ )
+{
+ MAP_INFO *MapInfo;
+ LIST_ENTRY *Link;
+
+ if (Mapping == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ MapInfo = NULL;
+ for (Link = GetFirstNode (&gMaps)
+ ; !IsNull (&gMaps, Link)
+ ; Link = GetNextNode (&gMaps, Link)
+ ) {
+ MapInfo = MAP_INFO_FROM_LINK (Link);
+ if (MapInfo == Mapping) {
+ break;
+ }
+ }
+ //
+ // Mapping is not a valid value returned by Map()
+ //
+ if (MapInfo != Mapping) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ *DeviceAddress = MapInfo->DeviceAddress;
+ *NumberOfPages = MapInfo->NumberOfPages;
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.c
new file mode 100644
index 000000000..f2debe5a5
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.c
@@ -0,0 +1,703 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+UINT64 mBelow4GMemoryLimit;
+UINT64 mAbove4GMemoryLimit;
+
+EDKII_PLATFORM_VTD_POLICY_PROTOCOL *mPlatformVTdPolicy;
+
+VTD_ACCESS_REQUEST *mAccessRequest = NULL;
+UINTN mAccessRequestCount = 0;
+UINTN mAccessRequestMaxCount = 0;
+
+/**
+ Append VTd Access Request to global.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+
+**/
+EFI_STATUS
+RequestAccessAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ VTD_ACCESS_REQUEST *NewAccessRequest;
+ UINTN Index;
+
+ //
+ // Optimization for memory.
+ //
+ // If the last record is to IoMmuAccess=0,
+ // Check previous records and remove the matched entry.
+ //
+ if (IoMmuAccess == 0) {
+ for (Index = 0; Index < mAccessRequestCount; Index++) {
+ if ((mAccessRequest[Index].Segment == Segment) &&
+ (mAccessRequest[Index].SourceId.Uint16 == SourceId.Uint16) &&
+ (mAccessRequest[Index].BaseAddress == BaseAddress) &&
+ (mAccessRequest[Index].Length == Length) &&
+ (mAccessRequest[Index].IoMmuAccess != 0)) {
+ //
+ // Remove this record [Index].
+ // No need to add the new record.
+ //
+ if (Index != mAccessRequestCount - 1) {
+ CopyMem (
+ &mAccessRequest[Index],
+ &mAccessRequest[Index + 1],
+ sizeof (VTD_ACCESS_REQUEST) * (mAccessRequestCount - 1 - Index)
+ );
+ }
+ ZeroMem (&mAccessRequest[mAccessRequestCount - 1], sizeof(VTD_ACCESS_REQUEST));
+ mAccessRequestCount--;
+ return EFI_SUCCESS;
+ }
+ }
+ }
+
+ if (mAccessRequestCount >= mAccessRequestMaxCount) {
+ NewAccessRequest = AllocateZeroPool (sizeof(*NewAccessRequest) * (mAccessRequestMaxCount + MAX_VTD_ACCESS_REQUEST));
+ if (NewAccessRequest == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+ mAccessRequestMaxCount += MAX_VTD_ACCESS_REQUEST;
+ if (mAccessRequest != NULL) {
+ CopyMem (NewAccessRequest, mAccessRequest, sizeof(*NewAccessRequest) * mAccessRequestCount);
+ FreePool (mAccessRequest);
+ }
+ mAccessRequest = NewAccessRequest;
+ }
+
+ ASSERT (mAccessRequestCount < mAccessRequestMaxCount);
+
+ mAccessRequest[mAccessRequestCount].Segment = Segment;
+ mAccessRequest[mAccessRequestCount].SourceId = SourceId;
+ mAccessRequest[mAccessRequestCount].BaseAddress = BaseAddress;
+ mAccessRequest[mAccessRequestCount].Length = Length;
+ mAccessRequest[mAccessRequestCount].IoMmuAccess = IoMmuAccess;
+
+ mAccessRequestCount++;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Process Access Requests from before DMAR table is installed.
+
+**/
+VOID
+ProcessRequestedAccessAttribute (
+ VOID
+ )
+{
+ UINTN Index;
+ EFI_STATUS Status;
+
+ DEBUG ((DEBUG_INFO, "ProcessRequestedAccessAttribute ...\n"));
+
+ for (Index = 0; Index < mAccessRequestCount; Index++) {
+ DEBUG ((
+ DEBUG_INFO,
+ "PCI(S%x.B%x.D%x.F%x) ",
+ mAccessRequest[Index].Segment,
+ mAccessRequest[Index].SourceId.Bits.Bus,
+ mAccessRequest[Index].SourceId.Bits.Device,
+ mAccessRequest[Index].SourceId.Bits.Function
+ ));
+ DEBUG ((
+ DEBUG_INFO,
+ "(0x%lx~0x%lx) - %lx\n",
+ mAccessRequest[Index].BaseAddress,
+ mAccessRequest[Index].Length,
+ mAccessRequest[Index].IoMmuAccess
+ ));
+ Status = SetAccessAttribute (
+ mAccessRequest[Index].Segment,
+ mAccessRequest[Index].SourceId,
+ mAccessRequest[Index].BaseAddress,
+ mAccessRequest[Index].Length,
+ mAccessRequest[Index].IoMmuAccess
+ );
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "SetAccessAttribute %r: ", Status));
+ }
+ }
+
+ if (mAccessRequest != NULL) {
+ FreePool (mAccessRequest);
+ }
+ mAccessRequest = NULL;
+ mAccessRequestCount = 0;
+ mAccessRequestMaxCount = 0;
+
+ DEBUG ((DEBUG_INFO, "ProcessRequestedAccessAttribute Done\n"));
+}
+
+/**
+ Return UEFI memory map information.
+
+ @param[out] Below4GMemoryLimit The below 4GiB memory limit address or 0 if insufficient resources exist to
+ determine the address.
+ @param[out] Above4GMemoryLimit The above 4GiB memory limit address or 0 if insufficient resources exist to
+ determine the address.
+
+**/
+VOID
+ReturnUefiMemoryMap (
+ OUT UINT64 *Below4GMemoryLimit,
+ OUT UINT64 *Above4GMemoryLimit
+ )
+{
+ EFI_STATUS Status;
+ EFI_MEMORY_DESCRIPTOR *EfiMemoryMap;
+ EFI_MEMORY_DESCRIPTOR *EfiMemoryMapEnd;
+ EFI_MEMORY_DESCRIPTOR *EfiEntry;
+ EFI_MEMORY_DESCRIPTOR *NextEfiEntry;
+ EFI_MEMORY_DESCRIPTOR TempEfiEntry;
+ UINTN EfiMemoryMapSize;
+ UINTN EfiMapKey;
+ UINTN EfiDescriptorSize;
+ UINT32 EfiDescriptorVersion;
+ UINT64 MemoryBlockLength;
+
+ *Below4GMemoryLimit = 0;
+ *Above4GMemoryLimit = 0;
+
+ //
+ // Get the EFI memory map.
+ //
+ EfiMemoryMapSize = 0;
+ EfiMemoryMap = NULL;
+ Status = gBS->GetMemoryMap (
+ &EfiMemoryMapSize,
+ EfiMemoryMap,
+ &EfiMapKey,
+ &EfiDescriptorSize,
+ &EfiDescriptorVersion
+ );
+ ASSERT (Status == EFI_BUFFER_TOO_SMALL);
+
+ do {
+ //
+ // Use size returned back plus 1 descriptor for the AllocatePool.
+ // We don't just multiply by 2 since the "for" loop below terminates on
+ // EfiMemoryMapEnd which is dependent upon EfiMemoryMapSize. Otherwize
+ // we process bogus entries and create bogus E820 entries.
+ //
+ EfiMemoryMap = (EFI_MEMORY_DESCRIPTOR *) AllocatePool (EfiMemoryMapSize);
+ if (EfiMemoryMap == NULL) {
+ ASSERT (EfiMemoryMap != NULL);
+ return;
+ }
+
+ Status = gBS->GetMemoryMap (
+ &EfiMemoryMapSize,
+ EfiMemoryMap,
+ &EfiMapKey,
+ &EfiDescriptorSize,
+ &EfiDescriptorVersion
+ );
+ if (EFI_ERROR (Status)) {
+ FreePool (EfiMemoryMap);
+ }
+ } while (Status == EFI_BUFFER_TOO_SMALL);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Sort memory map from low to high
+ //
+ EfiEntry = EfiMemoryMap;
+ NextEfiEntry = NEXT_MEMORY_DESCRIPTOR (EfiEntry, EfiDescriptorSize);
+ EfiMemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) EfiMemoryMap + EfiMemoryMapSize);
+ while (EfiEntry < EfiMemoryMapEnd) {
+ while (NextEfiEntry < EfiMemoryMapEnd) {
+ if (EfiEntry->PhysicalStart > NextEfiEntry->PhysicalStart) {
+ CopyMem (&TempEfiEntry, EfiEntry, sizeof (EFI_MEMORY_DESCRIPTOR));
+ CopyMem (EfiEntry, NextEfiEntry, sizeof (EFI_MEMORY_DESCRIPTOR));
+ CopyMem (NextEfiEntry, &TempEfiEntry, sizeof (EFI_MEMORY_DESCRIPTOR));
+ }
+
+ NextEfiEntry = NEXT_MEMORY_DESCRIPTOR (NextEfiEntry, EfiDescriptorSize);
+ }
+
+ EfiEntry = NEXT_MEMORY_DESCRIPTOR (EfiEntry, EfiDescriptorSize);
+ NextEfiEntry = NEXT_MEMORY_DESCRIPTOR (EfiEntry, EfiDescriptorSize);
+ }
+
+ DEBUG ((DEBUG_INFO, "MemoryMap:\n"));
+ EfiEntry = EfiMemoryMap;
+ EfiMemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) EfiMemoryMap + EfiMemoryMapSize);
+ while (EfiEntry < EfiMemoryMapEnd) {
+ MemoryBlockLength = (UINT64) (LShiftU64 (EfiEntry->NumberOfPages, 12));
+ DEBUG ((DEBUG_INFO, "Entry(0x%02x) 0x%016lx - 0x%016lx\n", EfiEntry->Type, EfiEntry->PhysicalStart, EfiEntry->PhysicalStart + MemoryBlockLength));
+ switch (EfiEntry->Type) {
+ case EfiLoaderCode:
+ case EfiLoaderData:
+ case EfiBootServicesCode:
+ case EfiBootServicesData:
+ case EfiConventionalMemory:
+ case EfiRuntimeServicesCode:
+ case EfiRuntimeServicesData:
+ case EfiACPIReclaimMemory:
+ case EfiACPIMemoryNVS:
+ case EfiReservedMemoryType:
+ if ((EfiEntry->PhysicalStart + MemoryBlockLength) <= BASE_1MB) {
+ //
+ // Skip the memory block is under 1MB
+ //
+ } else if (EfiEntry->PhysicalStart >= BASE_4GB) {
+ if (*Above4GMemoryLimit < EfiEntry->PhysicalStart + MemoryBlockLength) {
+ *Above4GMemoryLimit = EfiEntry->PhysicalStart + MemoryBlockLength;
+ }
+ } else {
+ if (*Below4GMemoryLimit < EfiEntry->PhysicalStart + MemoryBlockLength) {
+ *Below4GMemoryLimit = EfiEntry->PhysicalStart + MemoryBlockLength;
+ }
+ }
+ break;
+ }
+ EfiEntry = NEXT_MEMORY_DESCRIPTOR (EfiEntry, EfiDescriptorSize);
+ }
+
+ FreePool (EfiMemoryMap);
+
+ DEBUG ((DEBUG_INFO, "Result:\n"));
+ DEBUG ((DEBUG_INFO, "Below4GMemoryLimit: 0x%016lx\n", *Below4GMemoryLimit));
+ DEBUG ((DEBUG_INFO, "Above4GMemoryLimit: 0x%016lx\n", *Above4GMemoryLimit));
+
+ return ;
+}
+
+/**
+ The scan bus callback function to always enable page attribute.
+
+ @param[in] Context The context of the callback.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Device The device of the source.
+ @param[in] Function The function of the source.
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device.
+**/
+EFI_STATUS
+EFIAPI
+ScanBusCallbackAlwaysEnablePageAttribute (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN UINT8 Device,
+ IN UINT8 Function
+ )
+{
+ VTD_SOURCE_ID SourceId;
+ EFI_STATUS Status;
+
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+ Status = AlwaysEnablePageAttribute (Segment, SourceId);
+ return Status;
+}
+
+/**
+ Always enable the VTd page attribute for the device in the DeviceScope.
+
+ @param[in] DeviceScope the input device scope data structure
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device in the device scope.
+**/
+EFI_STATUS
+AlwaysEnablePageAttributeDeviceScope (
+ IN EDKII_PLATFORM_VTD_DEVICE_SCOPE *DeviceScope
+ )
+{
+ UINT8 Bus;
+ UINT8 Device;
+ UINT8 Function;
+ VTD_SOURCE_ID SourceId;
+ UINT8 SecondaryBusNumber;
+ EFI_STATUS Status;
+
+ Status = GetPciBusDeviceFunction (DeviceScope->SegmentNumber, &DeviceScope->DeviceScope, &Bus, &Device, &Function);
+
+ if (DeviceScope->DeviceScope.Type == EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE) {
+ //
+ // Need scan the bridge and add all devices.
+ //
+ SecondaryBusNumber = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(DeviceScope->SegmentNumber, Bus, Device, Function, PCI_BRIDGE_SECONDARY_BUS_REGISTER_OFFSET));
+ Status = ScanPciBus (NULL, DeviceScope->SegmentNumber, SecondaryBusNumber, ScanBusCallbackAlwaysEnablePageAttribute);
+ return Status;
+ } else {
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+ Status = AlwaysEnablePageAttribute (DeviceScope->SegmentNumber, SourceId);
+ return Status;
+ }
+}
+
+/**
+ Always enable the VTd page attribute for the device matching DeviceId.
+
+ @param[in] PciDeviceId the input PCI device ID
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device matching DeviceId.
+**/
+EFI_STATUS
+AlwaysEnablePageAttributePciDeviceId (
+ IN EDKII_PLATFORM_VTD_PCI_DEVICE_ID *PciDeviceId
+ )
+{
+ UINTN VtdIndex;
+ UINTN PciIndex;
+ PCI_DEVICE_DATA *PciDeviceData;
+ EFI_STATUS Status;
+
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ for (PciIndex = 0; PciIndex < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; PciIndex++) {
+ PciDeviceData = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[PciIndex];
+
+ if (((PciDeviceId->VendorId == 0xFFFF) || (PciDeviceId->VendorId == PciDeviceData->PciDeviceId.VendorId)) &&
+ ((PciDeviceId->DeviceId == 0xFFFF) || (PciDeviceId->DeviceId == PciDeviceData->PciDeviceId.DeviceId)) &&
+ ((PciDeviceId->RevisionId == 0xFF) || (PciDeviceId->RevisionId == PciDeviceData->PciDeviceId.RevisionId)) &&
+ ((PciDeviceId->SubsystemVendorId == 0xFFFF) || (PciDeviceId->SubsystemVendorId == PciDeviceData->PciDeviceId.SubsystemVendorId)) &&
+ ((PciDeviceId->SubsystemDeviceId == 0xFFFF) || (PciDeviceId->SubsystemDeviceId == PciDeviceData->PciDeviceId.SubsystemDeviceId)) ) {
+ Status = AlwaysEnablePageAttribute (mVtdUnitInformation[VtdIndex].Segment, PciDeviceData->PciSourceId);
+ if (EFI_ERROR(Status)) {
+ continue;
+ }
+ }
+ }
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Always enable the VTd page attribute for the device.
+
+ @param[in] DeviceInfo the exception device information
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device in the device info.
+**/
+EFI_STATUS
+AlwaysEnablePageAttributeExceptionDeviceInfo (
+ IN EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO *DeviceInfo
+ )
+{
+ switch (DeviceInfo->Type) {
+ case EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO_TYPE_DEVICE_SCOPE:
+ return AlwaysEnablePageAttributeDeviceScope ((VOID *)(DeviceInfo + 1));
+ case EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO_TYPE_PCI_DEVICE_ID:
+ return AlwaysEnablePageAttributePciDeviceId ((VOID *)(DeviceInfo + 1));
+ default:
+ return EFI_UNSUPPORTED;
+ }
+}
+
+/**
+ Initialize platform VTd policy.
+**/
+VOID
+InitializePlatformVTdPolicy (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ UINTN DeviceInfoCount;
+ VOID *DeviceInfo;
+ EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO *ThisDeviceInfo;
+ UINTN Index;
+
+ //
+ // It is optional.
+ //
+ Status = gBS->LocateProtocol (
+ &gEdkiiPlatformVTdPolicyProtocolGuid,
+ NULL,
+ (VOID **)&mPlatformVTdPolicy
+ );
+ if (!EFI_ERROR(Status)) {
+ DEBUG ((DEBUG_INFO, "InitializePlatformVTdPolicy\n"));
+ Status = mPlatformVTdPolicy->GetExceptionDeviceList (mPlatformVTdPolicy, &DeviceInfoCount, &DeviceInfo);
+ if (!EFI_ERROR(Status)) {
+ ThisDeviceInfo = DeviceInfo;
+ for (Index = 0; Index < DeviceInfoCount; Index++) {
+ if (ThisDeviceInfo->Type == EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO_TYPE_END) {
+ break;
+ }
+ AlwaysEnablePageAttributeExceptionDeviceInfo (ThisDeviceInfo);
+ ThisDeviceInfo = (VOID *)((UINTN)ThisDeviceInfo + ThisDeviceInfo->Length);
+ }
+ FreePool (DeviceInfo);
+ }
+ }
+}
+
+/**
+ Setup VTd engine.
+**/
+VOID
+SetupVtd (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ VOID *PciEnumerationComplete;
+ UINTN Index;
+ UINT64 Below4GMemoryLimit;
+ UINT64 Above4GMemoryLimit;
+ VTD_ROOT_TABLE_INFO RootTableInfo;
+
+ //
+ // PCI Enumeration must be done
+ //
+ Status = gBS->LocateProtocol (
+ &gEfiPciEnumerationCompleteProtocolGuid,
+ NULL,
+ &PciEnumerationComplete
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ ReturnUefiMemoryMap (&Below4GMemoryLimit, &Above4GMemoryLimit);
+ Below4GMemoryLimit = ALIGN_VALUE_UP(Below4GMemoryLimit, SIZE_256MB);
+ DEBUG ((DEBUG_INFO, " Adjusted Below4GMemoryLimit: 0x%016lx\n", Below4GMemoryLimit));
+
+ mBelow4GMemoryLimit = Below4GMemoryLimit;
+ mAbove4GMemoryLimit = Above4GMemoryLimit;
+
+ VTdLogAddEvent (VTDLOG_DXE_SETUP_VTD, Below4GMemoryLimit, Above4GMemoryLimit);
+
+ //
+ // 1. setup
+ //
+ DEBUG ((DEBUG_INFO, "ParseDmarAcpiTable\n"));
+ Status = ParseDmarAcpiTableDrhd ();
+ if (EFI_ERROR (Status)) {
+ return;
+ }
+
+ DEBUG ((DEBUG_INFO, "PrepareVtdConfig\n"));
+ PrepareVtdConfig ();
+
+ //
+ // 2. initialization
+ //
+ DEBUG ((DEBUG_INFO, "SetupTranslationTable\n"));
+ Status = SetupTranslationTable ();
+ if (EFI_ERROR (Status)) {
+ return;
+ }
+
+ InitializePlatformVTdPolicy ();
+
+ ParseDmarAcpiTableRmrr ();
+
+ if ((PcdGet8 (PcdVTdPolicyPropertyMask) & BIT2) == 0) {
+ //
+ // Support IOMMU access attribute request recording before DMAR table is installed.
+ // Here is to process the requests.
+ //
+ ProcessRequestedAccessAttribute ();
+ }
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ DEBUG ((DEBUG_INFO,"VTD Unit %d (Segment: %04x)\n", Index, mVtdUnitInformation[Index].Segment));
+
+ if (mVtdUnitInformation[Index].ExtRootEntryTable != NULL) {
+ VtdLibDumpDmarExtContextEntryTable (NULL, NULL, mVtdUnitInformation[Index].ExtRootEntryTable, mVtdUnitInformation[Index].Is5LevelPaging);
+
+ RootTableInfo.BaseAddress = mVtdUnitInformation[Index].VtdUnitBaseAddress;
+ RootTableInfo.TableAddress = (UINT64) (UINTN) mVtdUnitInformation[Index].RootEntryTable;
+ RootTableInfo.Is5LevelPaging = mVtdUnitInformation[Index].Is5LevelPaging;
+ VTdLogAddDataEvent (VTDLOG_DXE_ROOT_TABLE, 1, &RootTableInfo, sizeof (VTD_ROOT_TABLE_INFO));
+ }
+
+ if (mVtdUnitInformation[Index].RootEntryTable != NULL) {
+ VtdLibDumpDmarContextEntryTable (NULL, NULL, mVtdUnitInformation[Index].RootEntryTable, mVtdUnitInformation[Index].Is5LevelPaging);
+
+ RootTableInfo.BaseAddress = mVtdUnitInformation[Index].VtdUnitBaseAddress;
+ RootTableInfo.TableAddress = (UINT64) (UINTN) mVtdUnitInformation[Index].RootEntryTable;
+ RootTableInfo.Is5LevelPaging = mVtdUnitInformation[Index].Is5LevelPaging;
+ VTdLogAddDataEvent (VTDLOG_DXE_ROOT_TABLE, 0, &RootTableInfo, sizeof (VTD_ROOT_TABLE_INFO));
+ }
+ }
+
+ //
+ // 3. enable
+ //
+ DEBUG ((DEBUG_INFO, "EnableDmar\n"));
+ Status = EnableDmar ();
+ if (EFI_ERROR (Status)) {
+ return;
+ }
+ DEBUG ((DEBUG_INFO, "DumpVtdRegs\n"));
+ DumpVtdRegsAll ();
+}
+
+/**
+ Notification function of ACPI Table change.
+
+ This is a notification function registered on ACPI Table change event.
+
+ @param Event Event whose notification function is being invoked.
+ @param Context Pointer to the notification function's context.
+
+**/
+VOID
+EFIAPI
+AcpiNotificationFunc (
+ IN EFI_EVENT Event,
+ IN VOID *Context
+ )
+{
+ EFI_STATUS Status;
+
+ Status = GetDmarAcpiTable ();
+ if (EFI_ERROR (Status)) {
+ if (Status == EFI_ALREADY_STARTED) {
+ gBS->CloseEvent (Event);
+ }
+ return;
+ }
+ SetupVtd ();
+ gBS->CloseEvent (Event);
+}
+
+/**
+ Exit boot service callback function.
+
+ @param[in] Event The event handle.
+ @param[in] Context The event content.
+**/
+VOID
+EFIAPI
+OnExitBootServices (
+ IN EFI_EVENT Event,
+ IN VOID *Context
+ )
+{
+ UINTN VtdIndex;
+
+ DEBUG ((DEBUG_INFO, "Vtd OnExitBootServices\n"));
+
+ if ((PcdGet8(PcdVTdPolicyPropertyMask) & BIT1) == 0) {
+ DumpVtdRegsAll ();
+
+ DEBUG ((DEBUG_INFO, "Invalidate all\n"));
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ VtdLibFlushWriteBuffer (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress);
+
+ InvalidateContextCache (VtdIndex);
+
+ InvalidateIOTLB (VtdIndex);
+ }
+
+ DisableDmar ();
+ DumpVtdRegsAll ();
+ }
+}
+
+/**
+ Legacy boot callback function.
+
+ @param[in] Event The event handle.
+ @param[in] Context The event content.
+**/
+VOID
+EFIAPI
+OnLegacyBoot (
+ EFI_EVENT Event,
+ VOID *Context
+ )
+{
+ DEBUG ((DEBUG_INFO, "Vtd OnLegacyBoot\n"));
+ DumpVtdRegsAll ();
+ DisableDmar ();
+ DumpVtdRegsAll ();
+}
+
+/**
+ Initialize DMA protection.
+**/
+VOID
+InitializeDmaProtection (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_EVENT ExitBootServicesEvent;
+ EFI_EVENT LegacyBootEvent;
+ EFI_EVENT EventAcpi10;
+ EFI_EVENT EventAcpi20;
+
+ Status = gBS->CreateEventEx (
+ EVT_NOTIFY_SIGNAL,
+ VTD_TPL_LEVEL,
+ AcpiNotificationFunc,
+ NULL,
+ &gEfiAcpi10TableGuid,
+ &EventAcpi10
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ Status = gBS->CreateEventEx (
+ EVT_NOTIFY_SIGNAL,
+ VTD_TPL_LEVEL,
+ AcpiNotificationFunc,
+ NULL,
+ &gEfiAcpi20TableGuid,
+ &EventAcpi20
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Signal the events initially for the case
+ // that DMAR table has been installed.
+ //
+ gBS->SignalEvent (EventAcpi20);
+ gBS->SignalEvent (EventAcpi10);
+
+ Status = gBS->CreateEventEx (
+ EVT_NOTIFY_SIGNAL,
+ TPL_CALLBACK,
+ OnExitBootServices,
+ NULL,
+ &gEfiEventExitBootServicesGuid,
+ &ExitBootServicesEvent
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ Status = EfiCreateEventLegacyBootEx (
+ TPL_CALLBACK,
+ OnLegacyBoot,
+ NULL,
+ &LegacyBootEvent
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ return ;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.h b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.h
new file mode 100644
index 000000000..5cd59d18e
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.h
@@ -0,0 +1,658 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _DMAR_PROTECTION_H_
+#define _DMAR_PROTECTION_H_
+
+#include <Uefi.h>
+#include <PiDxe.h>
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/IoLib.h>
+#include <Library/PciSegmentLib.h>
+#include <Library/DebugLib.h>
+#include <Library/UefiLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/PerformanceLib.h>
+#include <Library/PrintLib.h>
+#include <Library/ReportStatusCodeLib.h>
+#include <Library/HobLib.h>
+
+#include <Guid/EventGroup.h>
+#include <Guid/Acpi.h>
+
+#include <Protocol/VtdLog.h>
+#include <Protocol/DxeSmmReadyToLock.h>
+#include <Protocol/PciRootBridgeIo.h>
+#include <Protocol/PciIo.h>
+#include <Protocol/PciEnumerationComplete.h>
+#include <Protocol/PlatformVtdPolicy.h>
+#include <Protocol/IoMmu.h>
+#include <Protocol/PciRootBridgeIo.h>
+
+#include <IndustryStandard/Pci.h>
+#include <IndustryStandard/DmaRemappingReportingTable.h>
+#include <IndustryStandard/Vtd.h>
+
+#include <Library/IntelVTdPeiDxeLib.h>
+
+#define VTD_64BITS_ADDRESS(Lo, Hi) (LShiftU64 (Lo, 12) | LShiftU64 (Hi, 32))
+
+#define ALIGN_VALUE_UP(Value, Alignment) (((Value) + (Alignment) - 1) & (~((Alignment) - 1)))
+#define ALIGN_VALUE_LOW(Value, Alignment) ((Value) & (~((Alignment) - 1)))
+
+#define VTD_TPL_LEVEL TPL_NOTIFY
+
+//
+// Use 256-bit descriptor
+// Queue size is 128.
+//
+#define VTD_QUEUED_INVALIDATION_DESCRIPTOR_WIDTH 1
+#define VTD_INVALIDATION_QUEUE_SIZE 0
+
+//
+// This is the initial max PCI DATA number.
+// The number may be enlarged later.
+//
+#define MAX_VTD_PCI_DATA_NUMBER 0x100
+
+typedef struct {
+ UINTN VtdUnitBaseAddress;
+ UINT16 Segment;
+ VTD_VER_REG VerReg;
+ VTD_CAP_REG CapReg;
+ VTD_ECAP_REG ECapReg;
+ VTD_ROOT_ENTRY *RootEntryTable;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntryTable;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *FixedSecondLevelPagingEntry;
+ BOOLEAN HasDirtyContext;
+ BOOLEAN HasDirtyPages;
+ PCI_DEVICE_INFORMATION *PciDeviceInfo;
+ BOOLEAN Is5LevelPaging;
+ UINT8 EnableQueuedInvalidation;
+ VOID *QiDescBuffer;
+ UINTN QiDescBufferSize;
+} VTD_UNIT_INFORMATION;
+
+//
+// This is the initial max ACCESS request.
+// The number may be enlarged later.
+//
+#define MAX_VTD_ACCESS_REQUEST 0x100
+
+typedef struct {
+ UINT16 Segment;
+ VTD_SOURCE_ID SourceId;
+ UINT64 BaseAddress;
+ UINT64 Length;
+ UINT64 IoMmuAccess;
+} VTD_ACCESS_REQUEST;
+
+
+/**
+ The scan bus callback function.
+
+ It is called in PCI bus scan for each PCI device under the bus.
+
+ @param[in] Context The context of the callback.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Device The device of the source.
+ @param[in] Function The function of the source.
+
+ @retval EFI_SUCCESS The specific PCI device is processed in the callback.
+**/
+typedef
+EFI_STATUS
+(EFIAPI *SCAN_BUS_FUNC_CALLBACK_FUNC) (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN UINT8 Device,
+ IN UINT8 Function
+ );
+
+extern EFI_ACPI_DMAR_HEADER *mAcpiDmarTable;
+
+extern UINTN mVtdUnitNumber;
+extern VTD_UNIT_INFORMATION *mVtdUnitInformation;
+
+extern UINT64 mBelow4GMemoryLimit;
+extern UINT64 mAbove4GMemoryLimit;
+
+extern EDKII_PLATFORM_VTD_POLICY_PROTOCOL *mPlatformVTdPolicy;
+
+/**
+ Prepare VTD configuration.
+**/
+VOID
+PrepareVtdConfig (
+ VOID
+ );
+
+/**
+ Setup VTd translation table.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCE Setup translation table fail.
+**/
+EFI_STATUS
+SetupTranslationTable (
+ VOID
+ );
+
+/**
+ Enable DMAR translation.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableDmar (
+ VOID
+ );
+
+/**
+ Disable DMAR translation.
+
+ @retval EFI_SUCCESS DMAR translation is disabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not disabled.
+**/
+EFI_STATUS
+DisableDmar (
+ VOID
+ );
+
+/**
+ Perpare cache invalidation interface.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval EFI_UNSUPPORTED Invalidation method is not supported.
+ @retval EFI_OUT_OF_RESOURCES A memory allocation failed.
+**/
+EFI_STATUS
+PerpareCacheInvalidationInterface (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Invalidate VTd context cache.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+**/
+EFI_STATUS
+InvalidateContextCache (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Invalidate VTd IOTLB.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+**/
+EFI_STATUS
+InvalidateIOTLB (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Invalid VTd global IOTLB.
+
+ @param[in] VtdIndex The index of VTd engine.
+
+ @retval EFI_SUCCESS VTd global IOTLB is invalidated.
+ @retval EFI_DEVICE_ERROR VTd global IOTLB is not invalidated.
+**/
+EFI_STATUS
+InvalidateVtdIOTLBGlobal (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Dump VTd registers.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+DumpVtdRegs (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ Dump VTd registers for all VTd engine.
+**/
+VOID
+DumpVtdRegsAll (
+ VOID
+ );
+
+/**
+ Dump VTd version registers.
+
+ @param[in] VerReg The version register.
+**/
+VOID
+DumpVtdVerRegs (
+ IN VTD_VER_REG *VerReg
+ );
+
+/**
+ Dump VTd capability registers.
+
+ @param[in] CapReg The capability register.
+**/
+VOID
+DumpVtdCapRegs (
+ IN VTD_CAP_REG *CapReg
+ );
+
+/**
+ Dump VTd extended capability registers.
+
+ @param[in] ECapReg The extended capability register.
+**/
+VOID
+DumpVtdECapRegs (
+ IN VTD_ECAP_REG *ECapReg
+ );
+
+/**
+ Register PCI device to VTd engine.
+
+ @param[in] VtdIndex The index of VTd engine.
+ @param[in] Segment The segment of the source.
+ @param[in] SourceId The SourceId of the source.
+ @param[in] DeviceType The DMAR device scope type.
+ @param[in] CheckExist TRUE: ERROR will be returned if the PCI device is already registered.
+ FALSE: SUCCESS will be returned if the PCI device is registered.
+
+ @retval EFI_SUCCESS The PCI device is registered.
+ @retval EFI_OUT_OF_RESOURCES No enough resource to register a new PCI device.
+ @retval EFI_ALREADY_STARTED The device is already registered.
+**/
+EFI_STATUS
+RegisterPciDevice (
+ IN UINTN VtdIndex,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT8 DeviceType,
+ IN BOOLEAN CheckExist
+ );
+
+/**
+ The scan bus callback function to always enable page attribute.
+
+ @param[in] Context The context of the callback.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Device The device of the source.
+ @param[in] Function The function of the source.
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device.
+**/
+EFI_STATUS
+EFIAPI
+ScanBusCallbackRegisterPciDevice (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN UINT8 Device,
+ IN UINT8 Function
+ );
+
+/**
+ Scan PCI bus and invoke callback function for each PCI devices under the bus.
+
+ @param[in] Context The context of the callback function.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Callback The callback function in PCI scan.
+
+ @retval EFI_SUCCESS The PCI devices under the bus are scaned.
+**/
+EFI_STATUS
+ScanPciBus (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN SCAN_BUS_FUNC_CALLBACK_FUNC Callback
+ );
+
+/**
+ Scan PCI bus and invoke callback function for each PCI devices under all root bus.
+
+ @param[in] Context The context of the callback function.
+ @param[in] Segment The segment of the source.
+ @param[in] Callback The callback function in PCI scan.
+
+ @retval EFI_SUCCESS The PCI devices under the bus are scaned.
+**/
+EFI_STATUS
+ScanAllPciBus (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN SCAN_BUS_FUNC_CALLBACK_FUNC Callback
+ );
+
+/**
+ Find the VTd index by the Segment and SourceId.
+
+ @param[in] Segment The segment of the source.
+ @param[in] SourceId The SourceId of the source.
+ @param[out] ExtContextEntry The ExtContextEntry of the source.
+ @param[out] ContextEntry The ContextEntry of the source.
+
+ @return The index of the VTd engine.
+ @retval (UINTN)-1 The VTd engine is not found.
+**/
+UINTN
+FindVtdIndexByPciDevice (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ OUT VTD_EXT_CONTEXT_ENTRY **ExtContextEntry,
+ OUT VTD_CONTEXT_ENTRY **ContextEntry
+ );
+
+/**
+ Get the DMAR ACPI table.
+
+ @retval EFI_SUCCESS The DMAR ACPI table is got.
+ @retval EFI_ALREADY_STARTED The DMAR ACPI table has been got previously.
+ @retval EFI_NOT_FOUND The DMAR ACPI table is not found.
+**/
+EFI_STATUS
+GetDmarAcpiTable (
+ VOID
+ );
+
+/**
+ Parse DMAR DRHD table.
+
+ @return EFI_SUCCESS The DMAR DRHD table is parsed.
+**/
+EFI_STATUS
+ParseDmarAcpiTableDrhd (
+ VOID
+ );
+
+/**
+ Parse DMAR RMRR table.
+
+ @return EFI_SUCCESS The DMAR RMRR table is parsed.
+**/
+EFI_STATUS
+ParseDmarAcpiTableRmrr (
+ VOID
+ );
+
+/**
+ Set VTd attribute for a system memory.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] DomainIdentifier The domain ID of the source.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetPageAttribute (
+ IN UINTN VtdIndex,
+ IN UINT16 DomainIdentifier,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ );
+
+/**
+ Set VTd attribute for a system memory.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetAccessAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ );
+
+/**
+ Return the index of PCI data.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @return The index of the PCI data.
+ @retval (UINTN)-1 The PCI data is not found.
+**/
+UINTN
+GetPciDataIndex (
+ IN UINTN VtdIndex,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ );
+
+/**
+ Dump VTd registers if there is error.
+**/
+VOID
+DumpVtdIfError (
+ VOID
+ );
+
+/**
+ Initialize platform VTd policy.
+**/
+VOID
+InitializePlatformVTdPolicy (
+ VOID
+ );
+
+/**
+ Always enable the VTd page attribute for the device.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device.
+**/
+EFI_STATUS
+AlwaysEnablePageAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ );
+
+/**
+ Convert the DeviceHandle to SourceId and Segment.
+
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[out] Segment The Segment used to identify a VTd engine.
+ @param[out] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @retval EFI_SUCCESS The Segment and SourceId are returned.
+ @retval EFI_INVALID_PARAMETER DeviceHandle is an invalid handle.
+ @retval EFI_UNSUPPORTED DeviceHandle is unknown by the IOMMU.
+**/
+EFI_STATUS
+DeviceHandleToSourceId (
+ IN EFI_HANDLE DeviceHandle,
+ OUT UINT16 *Segment,
+ OUT VTD_SOURCE_ID *SourceId
+ );
+
+/**
+ Get device information from mapping.
+
+ @param[in] Mapping The mapping.
+ @param[out] DeviceAddress The device address of the mapping.
+ @param[out] NumberOfPages The number of pages of the mapping.
+
+ @retval EFI_SUCCESS The device information is returned.
+ @retval EFI_INVALID_PARAMETER The mapping is invalid.
+**/
+EFI_STATUS
+GetDeviceInfoFromMapping (
+ IN VOID *Mapping,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT UINTN *NumberOfPages
+ );
+
+/**
+ Initialize DMA protection.
+**/
+VOID
+InitializeDmaProtection (
+ VOID
+ );
+
+/**
+ Allocate zero pages.
+
+ @param[in] Pages the number of pages.
+
+ @return the page address.
+ @retval NULL No resource to allocate pages.
+**/
+VOID *
+EFIAPI
+AllocateZeroPages (
+ IN UINTN Pages
+ );
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+FlushPageTableMemory (
+ IN UINTN VtdIndex,
+ IN UINTN Base,
+ IN UINTN Size
+ );
+
+/**
+ Get PCI device information from DMAR DevScopeEntry.
+
+ @param[in] Segment The segment number.
+ @param[in] DmarDevScopeEntry DMAR DevScopeEntry
+ @param[out] Bus The bus number.
+ @param[out] Device The device number.
+ @param[out] Function The function number.
+
+ @retval EFI_SUCCESS The PCI device information is returned.
+**/
+EFI_STATUS
+GetPciBusDeviceFunction (
+ IN UINT16 Segment,
+ IN EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDevScopeEntry,
+ OUT UINT8 *Bus,
+ OUT UINT8 *Device,
+ OUT UINT8 *Function
+ );
+
+/**
+ Append VTd Access Request to global.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+
+**/
+EFI_STATUS
+RequestAccessAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ );
+
+/**
+ Add a new VTd log event.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+
+**/
+VOID
+EFIAPI
+VTdLogAddEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Data1,
+ IN CONST UINT64 Data2
+ );
+
+/**
+ Add a new VTd log event with data.
+
+ @param[in] EventType Event type
+ @param[in] Param parameter
+ @param[in] Data Data
+ @param[in] DataSize Data size
+
+**/
+VOID
+EFIAPI
+VTdLogAddDataEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Param,
+ IN CONST VOID *Data,
+ IN CONST UINT32 DataSize
+ );
+
+#endif
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmarAcpiTable.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmarAcpiTable.c
new file mode 100644
index 000000000..21f559983
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmarAcpiTable.c
@@ -0,0 +1,398 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+#pragma pack(1)
+
+typedef struct {
+ EFI_ACPI_DESCRIPTION_HEADER Header;
+ UINT32 Entry;
+} RSDT_TABLE;
+
+typedef struct {
+ EFI_ACPI_DESCRIPTION_HEADER Header;
+ UINT64 Entry;
+} XSDT_TABLE;
+
+#pragma pack()
+
+EFI_ACPI_DMAR_HEADER *mAcpiDmarTable = NULL;
+
+/**
+ Dump DMAR ACPI table.
+**/
+VOID
+VtdDumpDmarTable (
+ VOID
+ )
+{
+ VtdLibDumpAcpiDmar (NULL, NULL, (EFI_ACPI_DMAR_HEADER *) (UINTN) mAcpiDmarTable);
+
+ VTdLogAddDataEvent (VTDLOG_DXE_DMAR_TABLE, mAcpiDmarTable->Header.Length, (VOID *)mAcpiDmarTable, mAcpiDmarTable->Header.Length);
+}
+
+/**
+ Get PCI device information from DMAR DevScopeEntry.
+
+ @param[in] Segment The segment number.
+ @param[in] DmarDevScopeEntry DMAR DevScopeEntry
+ @param[out] Bus The bus number.
+ @param[out] Device The device number.
+ @param[out] Function The function number.
+
+ @retval EFI_SUCCESS The PCI device information is returned.
+**/
+EFI_STATUS
+GetPciBusDeviceFunction (
+ IN UINT16 Segment,
+ IN EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDevScopeEntry,
+ OUT UINT8 *Bus,
+ OUT UINT8 *Device,
+ OUT UINT8 *Function
+ )
+{
+ EFI_ACPI_DMAR_PCI_PATH *DmarPciPath;
+ UINT8 MyBus;
+ UINT8 MyDevice;
+ UINT8 MyFunction;
+
+ DmarPciPath = (EFI_ACPI_DMAR_PCI_PATH *)((UINTN)(DmarDevScopeEntry + 1));
+ MyBus = DmarDevScopeEntry->StartBusNumber;
+ MyDevice = DmarPciPath->Device;
+ MyFunction = DmarPciPath->Function;
+
+ switch (DmarDevScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ while ((UINTN)DmarPciPath + sizeof(EFI_ACPI_DMAR_PCI_PATH) < (UINTN)DmarDevScopeEntry + DmarDevScopeEntry->Length) {
+ MyBus = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, MyBus, MyDevice, MyFunction, PCI_BRIDGE_SECONDARY_BUS_REGISTER_OFFSET));
+ DmarPciPath ++;
+ MyDevice = DmarPciPath->Device;
+ MyFunction = DmarPciPath->Function;
+ }
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_IOAPIC:
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_MSI_CAPABLE_HPET:
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_ACPI_NAMESPACE_DEVICE:
+ break;
+ }
+
+ *Bus = MyBus;
+ *Device = MyDevice;
+ *Function = MyFunction;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Process DMAR DRHD table.
+
+ @param[in] VtdIndex The index of VTd engine.
+ @param[in] DmarDrhd The DRHD table.
+
+ @retval EFI_SUCCESS The DRHD table is processed.
+**/
+EFI_STATUS
+ProcessDrhd (
+ IN UINTN VtdIndex,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *DmarDrhd
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDevScopeEntry;
+ UINT8 Bus;
+ UINT8 Device;
+ UINT8 Function;
+ UINT8 SecondaryBusNumber;
+ EFI_STATUS Status;
+ VTD_SOURCE_ID SourceId;
+
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo = AllocateZeroPool (sizeof (PCI_DEVICE_INFORMATION) + sizeof (PCI_DEVICE_DATA) * MAX_VTD_PCI_DATA_NUMBER);
+ if (mVtdUnitInformation[VtdIndex].PciDeviceInfo == NULL) {
+ ASSERT (FALSE);
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ mVtdUnitInformation[VtdIndex].Segment = DmarDrhd->SegmentNumber;
+ mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress = (UINTN)DmarDrhd->RegisterBaseAddress;
+ DEBUG ((DEBUG_INFO," VTD (%d) BaseAddress - 0x%016lx\n", VtdIndex, DmarDrhd->RegisterBaseAddress));
+
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->Segment = DmarDrhd->SegmentNumber;
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataMaxNumber = MAX_VTD_PCI_DATA_NUMBER;
+
+ if ((DmarDrhd->Flags & EFI_ACPI_DMAR_DRHD_FLAGS_INCLUDE_PCI_ALL) != 0) {
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->IncludeAllFlag = TRUE;
+ DEBUG ((DEBUG_INFO," ProcessDrhd: with INCLUDE ALL\n"));
+
+ Status = ScanAllPciBus((VOID *)VtdIndex, DmarDrhd->SegmentNumber, ScanBusCallbackRegisterPciDevice);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ } else {
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->IncludeAllFlag = FALSE;
+ DEBUG ((DEBUG_INFO," ProcessDrhd: without INCLUDE ALL\n"));
+ }
+
+ DmarDevScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)(DmarDrhd + 1));
+ while ((UINTN)DmarDevScopeEntry < (UINTN)DmarDrhd + DmarDrhd->Header.Length) {
+
+ Status = GetPciBusDeviceFunction (DmarDrhd->SegmentNumber, DmarDevScopeEntry, &Bus, &Device, &Function);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ DEBUG ((DEBUG_INFO," ProcessDrhd: "));
+ switch (DmarDevScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ DEBUG ((DEBUG_INFO,"PCI Endpoint"));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ DEBUG ((DEBUG_INFO,"PCI-PCI bridge"));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_IOAPIC:
+ DEBUG ((DEBUG_INFO,"IOAPIC"));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_MSI_CAPABLE_HPET:
+ DEBUG ((DEBUG_INFO,"MSI Capable HPET"));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_ACPI_NAMESPACE_DEVICE:
+ DEBUG ((DEBUG_INFO,"ACPI Namespace Device"));
+ break;
+ }
+ DEBUG ((DEBUG_INFO," S%04x B%02x D%02x F%02x\n", DmarDrhd->SegmentNumber, Bus, Device, Function));
+
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+
+ Status = RegisterPciDevice (VtdIndex, DmarDrhd->SegmentNumber, SourceId, DmarDevScopeEntry->Type, TRUE);
+ if (EFI_ERROR (Status)) {
+ //
+ // There might be duplication for special device other than standard PCI device.
+ //
+ switch (DmarDevScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ return Status;
+ }
+ }
+
+ switch (DmarDevScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ SecondaryBusNumber = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(DmarDrhd->SegmentNumber, Bus, Device, Function, PCI_BRIDGE_SECONDARY_BUS_REGISTER_OFFSET));
+ Status = ScanPciBus ((VOID *)VtdIndex, DmarDrhd->SegmentNumber, SecondaryBusNumber, ScanBusCallbackRegisterPciDevice);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ break;
+ default:
+ break;
+ }
+
+ DmarDevScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDevScopeEntry + DmarDevScopeEntry->Length);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Process DMAR RMRR table.
+
+ @param[in] DmarRmrr The RMRR table.
+
+ @retval EFI_SUCCESS The RMRR table is processed.
+**/
+EFI_STATUS
+ProcessRmrr (
+ IN EFI_ACPI_DMAR_RMRR_HEADER *DmarRmrr
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDevScopeEntry;
+ UINT8 Bus;
+ UINT8 Device;
+ UINT8 Function;
+ EFI_STATUS Status;
+ VTD_SOURCE_ID SourceId;
+
+ DEBUG ((DEBUG_INFO," RMRR (Base 0x%016lx, Limit 0x%016lx)\n", DmarRmrr->ReservedMemoryRegionBaseAddress, DmarRmrr->ReservedMemoryRegionLimitAddress));
+
+ DmarDevScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)(DmarRmrr + 1));
+ while ((UINTN)DmarDevScopeEntry < (UINTN)DmarRmrr + DmarRmrr->Header.Length) {
+ if (DmarDevScopeEntry->Type != EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT) {
+ DEBUG ((DEBUG_INFO,"RMRR DevScopeEntryType is not endpoint, type[0x%x] \n", DmarDevScopeEntry->Type));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Status = GetPciBusDeviceFunction (DmarRmrr->SegmentNumber, DmarDevScopeEntry, &Bus, &Device, &Function);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ DEBUG ((DEBUG_INFO,"RMRR S%04x B%02x D%02x F%02x\n", DmarRmrr->SegmentNumber, Bus, Device, Function));
+
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+ Status = SetAccessAttribute (
+ DmarRmrr->SegmentNumber,
+ SourceId,
+ DmarRmrr->ReservedMemoryRegionBaseAddress,
+ DmarRmrr->ReservedMemoryRegionLimitAddress + 1 - DmarRmrr->ReservedMemoryRegionBaseAddress,
+ EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE
+ );
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ DmarDevScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDevScopeEntry + DmarDevScopeEntry->Length);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Get VTd engine number.
+**/
+UINTN
+GetVtdEngineNumber (
+ VOID
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ UINTN VtdIndex;
+
+ VtdIndex = 0;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)(mAcpiDmarTable + 1));
+ while ((UINTN)DmarHeader < (UINTN)mAcpiDmarTable + mAcpiDmarTable->Header.Length) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ VtdIndex++;
+ break;
+ default:
+ break;
+ }
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+ return VtdIndex ;
+}
+
+/**
+ Parse DMAR DRHD table.
+
+ @return EFI_SUCCESS The DMAR DRHD table is parsed.
+**/
+EFI_STATUS
+ParseDmarAcpiTableDrhd (
+ VOID
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ EFI_STATUS Status;
+ UINTN VtdIndex;
+
+ mVtdUnitNumber = GetVtdEngineNumber ();
+ DEBUG ((DEBUG_INFO," VtdUnitNumber - %d\n", mVtdUnitNumber));
+ ASSERT (mVtdUnitNumber > 0);
+ if (mVtdUnitNumber == 0) {
+ return EFI_DEVICE_ERROR;
+ }
+
+ mVtdUnitInformation = AllocateZeroPool (sizeof(*mVtdUnitInformation) * mVtdUnitNumber);
+ ASSERT (mVtdUnitInformation != NULL);
+ if (mVtdUnitInformation == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ VtdIndex = 0;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)(mAcpiDmarTable + 1));
+ while ((UINTN)DmarHeader < (UINTN)mAcpiDmarTable + mAcpiDmarTable->Header.Length) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ ASSERT (VtdIndex < mVtdUnitNumber);
+ Status = ProcessDrhd (VtdIndex, (EFI_ACPI_DMAR_DRHD_HEADER *)DmarHeader);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ VtdIndex++;
+
+ break;
+
+ default:
+ break;
+ }
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+ ASSERT (VtdIndex == mVtdUnitNumber);
+
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ VtdLibDumpPciDeviceInfo (NULL, NULL, mVtdUnitInformation[VtdIndex].PciDeviceInfo);
+
+ VTdLogAddDataEvent (VTDLOG_DXE_PCI_DEVICE,
+ mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress,
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo,
+ sizeof (PCI_DEVICE_INFORMATION) + sizeof (PCI_DEVICE_DATA) * mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber);
+ }
+ return EFI_SUCCESS ;
+}
+
+/**
+ Parse DMAR DRHD table.
+
+ @return EFI_SUCCESS The DMAR DRHD table is parsed.
+**/
+EFI_STATUS
+ParseDmarAcpiTableRmrr (
+ VOID
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ EFI_STATUS Status;
+
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)(mAcpiDmarTable + 1));
+ while ((UINTN)DmarHeader < (UINTN)mAcpiDmarTable + mAcpiDmarTable->Header.Length) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_RMRR:
+ Status = ProcessRmrr ((EFI_ACPI_DMAR_RMRR_HEADER *)DmarHeader);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ break;
+ default:
+ break;
+ }
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+ return EFI_SUCCESS ;
+}
+
+/**
+ Get the DMAR ACPI table.
+
+ @retval EFI_SUCCESS The DMAR ACPI table is got.
+ @retval EFI_ALREADY_STARTED The DMAR ACPI table has been got previously.
+ @retval EFI_NOT_FOUND The DMAR ACPI table is not found.
+**/
+EFI_STATUS
+GetDmarAcpiTable (
+ VOID
+ )
+{
+ if (mAcpiDmarTable != NULL) {
+ return EFI_ALREADY_STARTED;
+ }
+
+ mAcpiDmarTable = (EFI_ACPI_DMAR_HEADER *) EfiLocateFirstAcpiTable (
+ EFI_ACPI_4_0_DMA_REMAPPING_TABLE_SIGNATURE
+ );
+ if (mAcpiDmarTable == NULL) {
+ return EFI_NOT_FOUND;
+ }
+ DEBUG ((DEBUG_INFO,"DMAR Table - 0x%08x\n", mAcpiDmarTable));
+ VtdDumpDmarTable();
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c
new file mode 100644
index 000000000..dc7a7146d
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c
@@ -0,0 +1,782 @@
+/** @file
+ Intel VTd driver.
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+/**
+ Provides the controller-specific addresses required to access system memory from a
+ DMA bus master.
+
+ @param This The protocol instance pointer.
+ @param Operation Indicates if the bus master is going to read or write to system memory.
+ @param HostAddress The system memory address to map to the PCI controller.
+ @param NumberOfBytes On input the number of bytes to map. On output the number of bytes
+ that were mapped.
+ @param DeviceAddress The resulting map address for the bus master PCI controller to use to
+ access the hosts HostAddress.
+ @param Mapping A resulting value to pass to Unmap().
+
+ @retval EFI_SUCCESS The range was mapped for the returned NumberOfBytes.
+ @retval EFI_UNSUPPORTED The HostAddress cannot be mapped as a common buffer.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources.
+ @retval EFI_DEVICE_ERROR The system hardware could not map the requested address.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuMap (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EDKII_IOMMU_OPERATION Operation,
+ IN VOID *HostAddress,
+ IN OUT UINTN *NumberOfBytes,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT VOID **Mapping
+ );
+
+/**
+ Completes the Map() operation and releases any corresponding resources.
+
+ @param This The protocol instance pointer.
+ @param Mapping The mapping value returned from Map().
+
+ @retval EFI_SUCCESS The range was unmapped.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_DEVICE_ERROR The data was not committed to the target system memory.
+**/
+EFI_STATUS
+EFIAPI
+IoMmuUnmap (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN VOID *Mapping
+ );
+
+/**
+ Allocates pages that are suitable for an OperationBusMasterCommonBuffer or
+ OperationBusMasterCommonBuffer64 mapping.
+
+ @param This The protocol instance pointer.
+ @param Type This parameter is not used and must be ignored.
+ @param MemoryType The type of memory to allocate, EfiBootServicesData or
+ EfiRuntimeServicesData.
+ @param Pages The number of pages to allocate.
+ @param HostAddress A pointer to store the base system memory address of the
+ allocated range.
+ @param Attributes The requested bit mask of attributes for the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were allocated.
+ @retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are
+ MEMORY_WRITE_COMBINE, MEMORY_CACHED and DUAL_ADDRESS_CYCLE.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuAllocateBuffer (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EFI_ALLOCATE_TYPE Type,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN Pages,
+ IN OUT VOID **HostAddress,
+ IN UINT64 Attributes
+ );
+
+/**
+ Frees memory that was allocated with AllocateBuffer().
+
+ @param This The protocol instance pointer.
+ @param Pages The number of pages to free.
+ @param HostAddress The base system memory address of the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were freed.
+ @retval EFI_INVALID_PARAMETER The memory range specified by HostAddress and Pages
+ was not allocated with AllocateBuffer().
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuFreeBuffer (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN UINTN Pages,
+ IN VOID *HostAddress
+ );
+
+/**
+ This function fills DeviceHandle/IoMmuAccess to the MAP_HANDLE_INFO,
+ based upon the DeviceAddress.
+
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] DeviceAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+**/
+VOID
+SyncDeviceHandleToMapInfo (
+ IN EFI_HANDLE DeviceHandle,
+ IN EFI_PHYSICAL_ADDRESS DeviceAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ );
+
+/**
+ Convert the DeviceHandle to SourceId and Segment.
+
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[out] Segment The Segment used to identify a VTd engine.
+ @param[out] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @retval EFI_SUCCESS The Segment and SourceId are returned.
+ @retval EFI_INVALID_PARAMETER DeviceHandle is an invalid handle.
+ @retval EFI_UNSUPPORTED DeviceHandle is unknown by the IOMMU.
+**/
+EFI_STATUS
+DeviceHandleToSourceId (
+ IN EFI_HANDLE DeviceHandle,
+ OUT UINT16 *Segment,
+ OUT VTD_SOURCE_ID *SourceId
+ )
+{
+ EFI_PCI_IO_PROTOCOL *PciIo;
+ UINTN Seg;
+ UINTN Bus;
+ UINTN Dev;
+ UINTN Func;
+ EFI_STATUS Status;
+ EDKII_PLATFORM_VTD_DEVICE_INFO DeviceInfo;
+
+ Status = EFI_NOT_FOUND;
+ if (mPlatformVTdPolicy != NULL) {
+ Status = mPlatformVTdPolicy->GetDeviceId (mPlatformVTdPolicy, DeviceHandle, &DeviceInfo);
+ if (!EFI_ERROR(Status)) {
+ *Segment = DeviceInfo.Segment;
+ *SourceId = DeviceInfo.SourceId;
+ return EFI_SUCCESS;
+ }
+ }
+
+ Status = gBS->HandleProtocol (DeviceHandle, &gEfiPciIoProtocolGuid, (VOID **)&PciIo);
+ if (EFI_ERROR(Status)) {
+ return EFI_UNSUPPORTED;
+ }
+ Status = PciIo->GetLocation (PciIo, &Seg, &Bus, &Dev, &Func);
+ if (EFI_ERROR(Status)) {
+ return EFI_UNSUPPORTED;
+ }
+ *Segment = (UINT16)Seg;
+ SourceId->Bits.Bus = (UINT8)Bus;
+ SourceId->Bits.Device = (UINT8)Dev;
+ SourceId->Bits.Function = (UINT8)Func;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Set IOMMU attribute for a system memory.
+
+ If the IOMMU protocol exists, the system memory cannot be used
+ for DMA by default.
+
+ When a device requests a DMA access for a system memory,
+ the device driver need use SetAttribute() to update the IOMMU
+ attribute to request DMA access (read and/or write).
+
+ The DeviceHandle is used to identify which device submits the request.
+ The IOMMU implementation need translate the device path to an IOMMU device ID,
+ and set IOMMU hardware register accordingly.
+ 1) DeviceHandle can be a standard PCI device.
+ The memory for BusMasterRead need set EDKII_IOMMU_ACCESS_READ.
+ The memory for BusMasterWrite need set EDKII_IOMMU_ACCESS_WRITE.
+ The memory for BusMasterCommonBuffer need set EDKII_IOMMU_ACCESS_READ|EDKII_IOMMU_ACCESS_WRITE.
+ After the memory is used, the memory need set 0 to keep it being protected.
+ 2) DeviceHandle can be an ACPI device (ISA, I2C, SPI, etc).
+ The memory for DMA access need set EDKII_IOMMU_ACCESS_READ and/or EDKII_IOMMU_ACCESS_WRITE.
+
+ @param[in] This The protocol instance pointer.
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] DeviceAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by DeviceAddress and Length.
+ @retval EFI_INVALID_PARAMETER DeviceHandle is an invalid handle.
+ @retval EFI_INVALID_PARAMETER DeviceAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED DeviceHandle is unknown by the IOMMU.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by DeviceAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+
+**/
+EFI_STATUS
+VTdSetAttribute (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EFI_HANDLE DeviceHandle,
+ IN EFI_PHYSICAL_ADDRESS DeviceAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ EFI_STATUS Status;
+ UINT16 Segment;
+ VTD_SOURCE_ID SourceId;
+ CHAR8 PerfToken[sizeof("VTD(S0000.B00.D00.F00)")];
+ UINT32 Identifier;
+ VTD_PROTOCOL_SET_ATTRIBUTE LogSetAttribute;
+
+ DumpVtdIfError ();
+
+ Status = DeviceHandleToSourceId (DeviceHandle, &Segment, &SourceId);
+ if (EFI_ERROR(Status)) {
+ return Status;
+ }
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuSetAttribute: "));
+ DEBUG ((DEBUG_VERBOSE, "PCI(S%x.B%x.D%x.F%x) ", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ DEBUG ((DEBUG_VERBOSE, "(0x%lx~0x%lx) - %lx\n", DeviceAddress, Length, IoMmuAccess));
+
+ if (mAcpiDmarTable == NULL) {
+ //
+ // Record the entry to driver global variable.
+ // As such once VTd is activated, the setting can be adopted.
+ //
+ if ((PcdGet8 (PcdVTdPolicyPropertyMask) & BIT2) != 0) {
+ //
+ // Force no IOMMU access attribute request recording before DMAR table is installed.
+ //
+ ASSERT_EFI_ERROR (EFI_NOT_READY);
+ return EFI_NOT_READY;
+ }
+ Status = RequestAccessAttribute (Segment, SourceId, DeviceAddress, Length, IoMmuAccess);
+ } else {
+ PERF_CODE (
+ AsciiSPrint (PerfToken, sizeof(PerfToken), "S%04xB%02xD%02xF%01x", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function);
+ Identifier = (Segment << 16) | SourceId.Uint16;
+ PERF_START_EX (gImageHandle, PerfToken, "IntelVTD", 0, Identifier);
+ );
+
+ Status = SetAccessAttribute (Segment, SourceId, DeviceAddress, Length, IoMmuAccess);
+
+ PERF_CODE (
+ Identifier = (Segment << 16) | SourceId.Uint16;
+ PERF_END_EX (gImageHandle, PerfToken, "IntelVTD", 0, Identifier);
+ );
+ }
+
+ if (!EFI_ERROR(Status)) {
+ SyncDeviceHandleToMapInfo (
+ DeviceHandle,
+ DeviceAddress,
+ Length,
+ IoMmuAccess
+ );
+ }
+
+ LogSetAttribute.SourceId.Uint16 = SourceId.Uint16;
+ LogSetAttribute.DeviceAddress = DeviceAddress;
+ LogSetAttribute.Length = Length;
+ LogSetAttribute.IoMmuAccess = IoMmuAccess;
+ LogSetAttribute.Status = Status;
+ VTdLogAddDataEvent (VTDLOG_DXE_IOMMU_SET_ATTRIBUTE, 0, &LogSetAttribute, sizeof (VTD_PROTOCOL_SET_ATTRIBUTE));
+
+ return Status;
+}
+
+/**
+ Set IOMMU attribute for a system memory.
+
+ If the IOMMU protocol exists, the system memory cannot be used
+ for DMA by default.
+
+ When a device requests a DMA access for a system memory,
+ the device driver need use SetAttribute() to update the IOMMU
+ attribute to request DMA access (read and/or write).
+
+ The DeviceHandle is used to identify which device submits the request.
+ The IOMMU implementation need translate the device path to an IOMMU device ID,
+ and set IOMMU hardware register accordingly.
+ 1) DeviceHandle can be a standard PCI device.
+ The memory for BusMasterRead need set EDKII_IOMMU_ACCESS_READ.
+ The memory for BusMasterWrite need set EDKII_IOMMU_ACCESS_WRITE.
+ The memory for BusMasterCommonBuffer need set EDKII_IOMMU_ACCESS_READ|EDKII_IOMMU_ACCESS_WRITE.
+ After the memory is used, the memory need set 0 to keep it being protected.
+ 2) DeviceHandle can be an ACPI device (ISA, I2C, SPI, etc).
+ The memory for DMA access need set EDKII_IOMMU_ACCESS_READ and/or EDKII_IOMMU_ACCESS_WRITE.
+
+ @param[in] This The protocol instance pointer.
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] Mapping The mapping value returned from Map().
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by DeviceAddress and Length.
+ @retval EFI_INVALID_PARAMETER DeviceHandle is an invalid handle.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED DeviceHandle is unknown by the IOMMU.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by Mapping.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuSetAttribute (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EFI_HANDLE DeviceHandle,
+ IN VOID *Mapping,
+ IN UINT64 IoMmuAccess
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS DeviceAddress;
+ UINTN NumberOfPages;
+ EFI_TPL OriginalTpl;
+
+ OriginalTpl = gBS->RaiseTPL (VTD_TPL_LEVEL);
+
+ Status = GetDeviceInfoFromMapping (Mapping, &DeviceAddress, &NumberOfPages);
+ if (!EFI_ERROR(Status)) {
+ Status = VTdSetAttribute (
+ This,
+ DeviceHandle,
+ DeviceAddress,
+ EFI_PAGES_TO_SIZE(NumberOfPages),
+ IoMmuAccess
+ );
+ }
+
+ gBS->RestoreTPL (OriginalTpl);
+
+ return Status;
+}
+
+EDKII_IOMMU_PROTOCOL mIntelVTd = {
+ EDKII_IOMMU_PROTOCOL_REVISION,
+ IoMmuSetAttribute,
+ IoMmuMap,
+ IoMmuUnmap,
+ IoMmuAllocateBuffer,
+ IoMmuFreeBuffer,
+};
+
+UINT8 *mVtdLogBuffer = NULL;
+
+UINT8 *mVtdLogDxeFreeBuffer = NULL;
+UINT32 mVtdLogDxeBufferUsed = 0;
+
+UINT32 mVtdLogPeiPostMemBufferUsed = 0;
+
+UINT8 mVtdLogPeiError = 0;
+UINT16 mVtdLogDxeError = 0;
+
+/**
+ Allocate memory buffer for VTd log items.
+
+ @param[in] MemorySize Required memory buffer size.
+
+ @retval Buffer address
+
+**/
+UINT8 *
+EFIAPI
+VTdLogAllocMemory (
+ IN CONST UINT32 MemorySize
+ )
+{
+ UINT8 *Buffer;
+
+ Buffer = NULL;
+ if (mVtdLogDxeFreeBuffer != NULL) {
+ if ((mVtdLogDxeBufferUsed + MemorySize) <= PcdGet32 (PcdVTdDxeLogBufferSize)) {
+ Buffer = mVtdLogDxeFreeBuffer;
+
+ mVtdLogDxeFreeBuffer += MemorySize;
+ mVtdLogDxeBufferUsed += MemorySize;
+ } else {
+ mVtdLogDxeError |= VTD_LOG_ERROR_BUFFER_FULL;
+ }
+ }
+ return Buffer;
+}
+
+/**
+ Add a new VTd log event.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+
+**/
+VOID
+EFIAPI
+VTdLogAddEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Data1,
+ IN CONST UINT64 Data2
+ )
+{
+ VTDLOG_EVENT_2PARAM *Item;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ } else if ((PcdGet8 (PcdVTdLogLevel) == 1) && (EventType >= VTDLOG_DXE_ADVANCED)) {
+ return;
+ }
+
+ Item = (VTDLOG_EVENT_2PARAM *) VTdLogAllocMemory (sizeof (VTDLOG_EVENT_2PARAM));
+ if (Item != NULL) {
+ Item->Data1 = Data1;
+ Item->Data2 = Data2;
+
+ Item->Header.DataSize = sizeof (VTDLOG_EVENT_2PARAM);
+ Item->Header.LogType = (UINT64) 1 << EventType;
+ Item->Header.Timestamp = AsmReadTsc ();
+ }
+}
+
+/**
+ Add a new VTd log event with data.
+
+ @param[in] EventType Event type
+ @param[in] Param parameter
+ @param[in] Data Data
+ @param[in] DataSize Data size
+
+**/
+VOID
+EFIAPI
+VTdLogAddDataEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Param,
+ IN CONST VOID *Data,
+ IN CONST UINT32 DataSize
+ )
+{
+ VTDLOG_EVENT_CONTEXT *Item;
+ UINT32 EventSize;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ } else if ((PcdGet8 (PcdVTdLogLevel) == 1) && (EventType >= VTDLOG_DXE_ADVANCED)) {
+ return;
+ }
+
+ EventSize = sizeof (VTDLOG_EVENT_CONTEXT) + DataSize - 1;
+
+ Item = (VTDLOG_EVENT_CONTEXT *) VTdLogAllocMemory (EventSize);
+ if (Item != NULL) {
+ Item->Param = Param;
+ CopyMem (Item->Data, Data, DataSize);
+
+ Item->Header.DataSize = EventSize;
+ Item->Header.LogType = (UINT64) 1 << EventType;
+ Item->Header.Timestamp = AsmReadTsc ();
+ }
+}
+
+/**
+ Get Event Items From Pei Pre-Mem Buffer
+
+ @param[in] Buffer Pre-Memory data buffer.
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+**/
+UINT64
+EFIAPI
+VTdGetEventItemsFromPeiPreMemBuffer (
+ IN VTDLOG_PEI_PRE_MEM_INFO *InfoBuffer,
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ )
+{
+ UINTN Index;
+ UINT64 EventCount;
+ VTDLOG_EVENT_2PARAM Event;
+
+ if (InfoBuffer == NULL) {
+ return 0;
+ }
+
+ EventCount = 0;
+ for (Index = 0; Index < VTD_LOG_PEI_PRE_MEM_BAR_MAX; Index++) {
+ if (InfoBuffer[Index].Mode == VTD_LOG_PEI_PRE_MEM_NOT_USED) {
+ continue;
+ }
+ if (CallbackHandle) {
+ Event.Header.DataSize = sizeof (VTDLOG_EVENT_2PARAM);
+ Event.Header.Timestamp = 0;
+
+ Event.Header.LogType = ((UINT64) 1) << VTDLOG_PEI_PRE_MEM_DMA_PROTECT;
+ Event.Data1 = InfoBuffer[Index].BarAddress;
+ Event.Data2 = InfoBuffer[Index].Mode;
+ Event.Data2 |= InfoBuffer[Index].Status<<8;
+ CallbackHandle (Context, &Event.Header);
+ }
+ EventCount++;
+ }
+
+ return EventCount;
+}
+
+/**
+ Get Event Items From Pei Post-Mem/Dxe Buffer
+
+ @param[in] Buffer Data buffer.
+ @param[in] BufferUsed Data buffer used.
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+**/
+UINT64
+EFIAPI
+VTdGetEventItemsFromBuffer (
+ IN UINT8 *Buffer,
+ IN UINT32 BufferUsed,
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ )
+{
+ UINT64 Count;
+ VTDLOG_EVENT_HEADER *Header;
+
+ Count = 0;
+ if (Buffer != NULL) {
+ while (BufferUsed > 0) {
+ Header = (VTDLOG_EVENT_HEADER *) Buffer;
+ if (BufferUsed >= Header->DataSize) {
+ if (CallbackHandle) {
+ CallbackHandle (Context, Header);
+ }
+ Buffer += Header->DataSize;
+ BufferUsed -= Header->DataSize;
+ Count++;
+ } else {
+ BufferUsed = 0;
+ }
+ }
+ }
+
+ return Count;
+}
+
+/**
+ Generate the VTd log state.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+**/
+VOID
+EFIAPI
+VTdGenerateStateEvent (
+ IN VTDLOG_EVENT_TYPE EventType,
+ IN UINT64 Data1,
+ IN UINT64 Data2,
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ )
+{
+ VTDLOG_EVENT_2PARAM Item;
+
+ Item.Data1 = Data1;
+ Item.Data2 = Data2;
+
+ Item.Header.DataSize = sizeof (VTDLOG_EVENT_2PARAM);
+ Item.Header.LogType = (UINT64) 1 << EventType;
+ Item.Header.Timestamp = 0;
+
+ if (CallbackHandle) {
+ CallbackHandle (Context, &Item.Header);
+ }
+}
+
+/**
+ Get the VTd log events.
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+
+ @retval UINT32 Number of events
+**/
+UINT64
+EFIAPI
+VTdLogGetEvents (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ )
+{
+ UINT64 Count;
+ UINT8 *Buffer;
+
+ if (mVtdLogBuffer == NULL) {
+ return 0;
+ }
+
+ //
+ // PEI pre-memory phase
+ //
+ Buffer = &mVtdLogBuffer[PcdGet32 (PcdVTdDxeLogBufferSize) + PcdGet32 (PcdVTdPeiPostMemLogBufferSize)];
+ Count = VTdGetEventItemsFromPeiPreMemBuffer ((VTDLOG_PEI_PRE_MEM_INFO *) Buffer, Context, CallbackHandle);
+
+ //
+ // PEI post memory phase
+ //
+ Buffer = &mVtdLogBuffer[PcdGet32 (PcdVTdDxeLogBufferSize)];
+ Count += VTdGetEventItemsFromBuffer (Buffer, mVtdLogPeiPostMemBufferUsed, Context, CallbackHandle);
+ if (mVtdLogPeiError != 0) {
+ VTdGenerateStateEvent (VTDLOG_PEI_BASIC, mVtdLogPeiError, 0, Context, CallbackHandle);
+ Count++;
+ }
+
+ //
+ // DXE phase
+ //
+ Buffer = &mVtdLogBuffer[0];
+ Count += VTdGetEventItemsFromBuffer (Buffer, mVtdLogDxeBufferUsed, Context, CallbackHandle);
+ if (mVtdLogDxeError != 0) {
+ VTdGenerateStateEvent (VTDLOG_DXE_BASIC, mVtdLogDxeError, 0, Context, CallbackHandle);
+ Count++;
+ }
+
+ return Count;
+}
+
+EDKII_VTD_LOG_PROTOCOL mIntelVTdLog = {
+ EDKII_VTD_LOG_PROTOCOL_REVISION,
+ VTdLogGetEvents
+};
+
+/**
+ Initializes the VTd Log.
+
+**/
+VOID
+EFIAPI
+VTdLogInitialize(
+ VOID
+ )
+{
+ UINT32 TotalBufferSize;
+ EFI_STATUS Status;
+ VOID *HobPtr;
+ VTDLOG_PEI_BUFFER_HOB *HobPeiBuffer;
+ EFI_HANDLE Handle;
+ UINT32 BufferOffset;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ }
+
+ if (mVtdLogBuffer != NULL) {
+ return;
+ }
+
+ TotalBufferSize = PcdGet32 (PcdVTdDxeLogBufferSize) + PcdGet32 (PcdVTdPeiPostMemLogBufferSize) + sizeof (VTDLOG_PEI_PRE_MEM_INFO) * VTD_LOG_PEI_PRE_MEM_BAR_MAX;
+
+ Status = gBS->AllocatePool (EfiBootServicesData, TotalBufferSize, &mVtdLogBuffer);
+ if (EFI_ERROR (Status)) {
+ return;
+ }
+
+ //
+ // DXE Buffer
+ //
+ if (PcdGet32 (PcdVTdDxeLogBufferSize) > 0) {
+ mVtdLogDxeFreeBuffer = mVtdLogBuffer;
+ mVtdLogDxeBufferUsed = 0;
+ }
+
+ //
+ // Get PEI pre-memory buffer offset
+ //
+ BufferOffset = PcdGet32 (PcdVTdDxeLogBufferSize) + PcdGet32 (PcdVTdPeiPostMemLogBufferSize);
+
+ HobPtr = GetFirstGuidHob (&gVTdLogBufferHobGuid);
+ if (HobPtr != NULL) {
+ HobPeiBuffer = GET_GUID_HOB_DATA (HobPtr);
+
+ //
+ // Copy PEI pre-memory phase VTd log.
+ //
+ CopyMem (&mVtdLogBuffer[BufferOffset], &HobPeiBuffer->PreMemInfo, sizeof (VTDLOG_PEI_PRE_MEM_INFO) * VTD_LOG_PEI_PRE_MEM_BAR_MAX);
+
+ //
+ // Copy PEI post-memory pase VTd log.
+ //
+ BufferOffset = PcdGet32 (PcdVTdDxeLogBufferSize);
+ if (PcdGet32 (PcdVTdPeiPostMemLogBufferSize) > 0) {
+ if (HobPeiBuffer->PostMemBufferUsed > 0) {
+ mVtdLogPeiPostMemBufferUsed = HobPeiBuffer->PostMemBufferUsed;
+ CopyMem (&mVtdLogBuffer[BufferOffset], (UINT8 *) (UINTN) HobPeiBuffer->PostMemBuffer, mVtdLogPeiPostMemBufferUsed);
+ }
+ }
+
+ mVtdLogPeiError = HobPeiBuffer->VtdLogPeiError;
+ } else {
+ //
+ // Do not find PEI Vtd log, clear PEI pre-memory phase buffer.
+ //
+ ZeroMem (&mVtdLogBuffer[BufferOffset], sizeof (VTDLOG_PEI_PRE_MEM_INFO) * VTD_LOG_PEI_PRE_MEM_BAR_MAX);
+ }
+
+ Handle = NULL;
+ Status = gBS->InstallMultipleProtocolInterfaces (
+ &Handle,
+ &gEdkiiVTdLogProtocolGuid,
+ &mIntelVTdLog,
+ NULL
+ );
+ ASSERT_EFI_ERROR (Status);
+}
+
+/**
+ Initialize the VTd driver.
+
+ @param[in] ImageHandle ImageHandle of the loaded driver
+ @param[in] SystemTable Pointer to the System Table
+
+ @retval EFI_SUCCESS The Protocol is installed.
+ @retval EFI_OUT_OF_RESOURCES Not enough resources available to initialize driver.
+ @retval EFI_DEVICE_ERROR A device error occurred attempting to initialize the driver.
+
+**/
+EFI_STATUS
+EFIAPI
+IntelVTdInitialize (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+{
+ EFI_STATUS Status;
+ EFI_HANDLE Handle;
+
+ if ((PcdGet8(PcdVTdPolicyPropertyMask) & BIT0) == 0) {
+ return EFI_UNSUPPORTED;
+ }
+
+ VTdLogInitialize ();
+
+ InitializeDmaProtection ();
+
+ Handle = NULL;
+ Status = gBS->InstallMultipleProtocolInterfaces (
+ &Handle,
+ &gEdkiiIoMmuProtocolGuid, &mIntelVTd,
+ NULL
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ VTdLogAddEvent (VTDLOG_DXE_INSTALL_IOMMU_PROTOCOL, Status, 0);
+
+ return Status;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf
new file mode 100644
index 000000000..6f3c9e7df
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf
@@ -0,0 +1,92 @@
+## @file
+# Intel VTd DXE Driver.
+#
+# This driver initializes VTd engine based upon DMAR ACPI tables
+# and provide DMA protection to PCI or ACPI device.
+#
+# Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = IntelVTdCoreDxe
+ MODULE_UNI_FILE = IntelVTdCoreDxe.uni
+ FILE_GUID = 5c83381f-34d3-4672-b8f3-83c3d6f3b00e
+ MODULE_TYPE = DXE_DRIVER
+ VERSION_STRING = 1.0
+ ENTRY_POINT = IntelVTdInitialize
+
+#
+# The following information is for reference only and not required by the build tools.
+#
+# VALID_ARCHITECTURES = IA32 X64 EBC
+#
+#
+
+[Sources]
+ IntelVTdCoreDxe.c
+ BmDma.c
+ DmaProtection.c
+ DmaProtection.h
+ DmarAcpiTable.c
+ PciInfo.c
+ TranslationTable.c
+ TranslationTableEx.c
+ VtdReg.c
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ IntelSiliconPkg/IntelSiliconPkg.dec
+
+[LibraryClasses]
+ DebugLib
+ UefiDriverEntryPoint
+ UefiBootServicesTableLib
+ BaseLib
+ IoLib
+ HobLib
+ PciSegmentLib
+ BaseMemoryLib
+ MemoryAllocationLib
+ UefiLib
+ CacheMaintenanceLib
+ PerformanceLib
+ PrintLib
+ ReportStatusCodeLib
+ IntelVTdPeiDxeLib
+
+[Guids]
+ gVTdLogBufferHobGuid ## CONSUMES
+ gEfiEventExitBootServicesGuid ## CONSUMES ## Event
+ ## CONSUMES ## SystemTable
+ ## CONSUMES ## Event
+ gEfiAcpi20TableGuid
+ ## CONSUMES ## SystemTable
+ ## CONSUMES ## Event
+ gEfiAcpi10TableGuid
+
+[Protocols]
+ gEdkiiIoMmuProtocolGuid ## PRODUCES
+ gEfiPciIoProtocolGuid ## CONSUMES
+ gEfiPciEnumerationCompleteProtocolGuid ## CONSUMES
+ gEdkiiPlatformVTdPolicyProtocolGuid ## SOMETIMES_CONSUMES
+ gEfiPciRootBridgeIoProtocolGuid ## CONSUMES
+ gEdkiiVTdLogProtocolGuid ## PRODUCES
+
+[Pcd]
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPolicyPropertyMask ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdErrorCodeVTdError ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdSupportAbortDmaMode ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdLogLevel ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiPostMemLogBufferSize ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdDxeLogBufferSize ## CONSUMES
+
+[Depex]
+ gEfiPciRootBridgeIoProtocolGuid
+
+[UserExtensions.TianoCore."ExtraFiles"]
+ IntelVTdCoreDxeExtra.uni
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni
new file mode 100644
index 000000000..73d2c83c4
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni
@@ -0,0 +1,14 @@
+// /** @file
+// IntelVTdDxe Module Localized Abstract and Description Content
+//
+// Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+
+#string STR_MODULE_ABSTRACT #language en-US "Intel VTd CORE DXE Driver."
+
+#string STR_MODULE_DESCRIPTION #language en-US "This driver initializes VTd engine based upon DMAR ACPI tables and provide DMA protection to PCI or ACPI device."
+
--git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxeExtra.uni b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxeExtra.uni
new file mode 100644
index 000000000..7f1aec65e
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxeExtra.uni
@@ -0,0 +1,14 @@
+// /** @file
+// IntelVTdDxe Localized Strings and Content
+//
+// Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+#string STR_PROPERTIES_MODULE_NAME
+#language en-US
+"Intel VTd CORE DXE Driver"
+
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/PciInfo.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/PciInfo.c
new file mode 100644
index 000000000..394ef734c
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/PciInfo.c
@@ -0,0 +1,419 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+/**
+ Return the index of PCI data.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @return The index of the PCI data.
+ @retval (UINTN)-1 The PCI data is not found.
+**/
+UINTN
+GetPciDataIndex (
+ IN UINTN VtdIndex,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ )
+{
+ UINTN Index;
+ VTD_SOURCE_ID *PciSourceId;
+
+ if (Segment != mVtdUnitInformation[VtdIndex].Segment) {
+ return (UINTN)-1;
+ }
+
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+ if ((PciSourceId->Bits.Bus == SourceId.Bits.Bus) &&
+ (PciSourceId->Bits.Device == SourceId.Bits.Device) &&
+ (PciSourceId->Bits.Function == SourceId.Bits.Function) ) {
+ return Index;
+ }
+ }
+
+ return (UINTN)-1;
+}
+
+/**
+ Register PCI device to VTd engine.
+
+ @param[in] VtdIndex The index of VTd engine.
+ @param[in] Segment The segment of the source.
+ @param[in] SourceId The SourceId of the source.
+ @param[in] DeviceType The DMAR device scope type.
+ @param[in] CheckExist TRUE: ERROR will be returned if the PCI device is already registered.
+ FALSE: SUCCESS will be returned if the PCI device is registered.
+
+ @retval EFI_SUCCESS The PCI device is registered.
+ @retval EFI_OUT_OF_RESOURCES No enough resource to register a new PCI device.
+ @retval EFI_ALREADY_STARTED The device is already registered.
+**/
+EFI_STATUS
+RegisterPciDevice (
+ IN UINTN VtdIndex,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT8 DeviceType,
+ IN BOOLEAN CheckExist
+ )
+{
+ PCI_DEVICE_INFORMATION *PciDeviceInfo;
+ VTD_SOURCE_ID *PciSourceId;
+ UINTN PciDataIndex;
+ UINTN Index;
+ //PCI_DEVICE_DATA *NewPciDeviceData;
+ PCI_DEVICE_INFORMATION *NewPciDeviceInfo;
+ EDKII_PLATFORM_VTD_PCI_DEVICE_ID *PciDeviceId;
+
+ PciDeviceInfo = mVtdUnitInformation[VtdIndex].PciDeviceInfo;
+
+ if (PciDeviceInfo->IncludeAllFlag) {
+ //
+ // Do not register device in other VTD Unit
+ //
+ for (Index = 0; Index < VtdIndex; Index++) {
+ PciDataIndex = GetPciDataIndex (Index, Segment, SourceId);
+ if (PciDataIndex != (UINTN)-1) {
+ DEBUG ((DEBUG_INFO, " RegisterPciDevice: PCI S%04x B%02x D%02x F%02x already registered by Other Vtd(%d)\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, Index));
+ return EFI_SUCCESS;
+ }
+ }
+ }
+
+ PciDataIndex = GetPciDataIndex (VtdIndex, Segment, SourceId);
+ if (PciDataIndex == (UINTN)-1) {
+ //
+ // Register new
+ //
+
+ if (PciDeviceInfo->PciDeviceDataNumber >= PciDeviceInfo->PciDeviceDataMaxNumber) {
+ //
+ // Reallocate
+ //
+ NewPciDeviceInfo = AllocateZeroPool (sizeof (PCI_DEVICE_INFORMATION) + sizeof (PCI_DEVICE_DATA) * (PciDeviceInfo->PciDeviceDataMaxNumber + MAX_VTD_PCI_DATA_NUMBER));
+ if (NewPciDeviceInfo == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ CopyMem (NewPciDeviceInfo, PciDeviceInfo, sizeof (PCI_DEVICE_INFORMATION) + sizeof (PCI_DEVICE_DATA) * (PciDeviceInfo->PciDeviceDataMaxNumber + MAX_VTD_PCI_DATA_NUMBER));
+ FreePool (PciDeviceInfo);
+
+ NewPciDeviceInfo->PciDeviceDataMaxNumber += MAX_VTD_PCI_DATA_NUMBER;
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo = NewPciDeviceInfo;
+ PciDeviceInfo = NewPciDeviceInfo;
+ }
+
+ ASSERT (PciDeviceInfo->PciDeviceDataNumber < PciDeviceInfo->PciDeviceDataMaxNumber);
+
+ PciSourceId = &PciDeviceInfo->PciDeviceData[PciDeviceInfo->PciDeviceDataNumber].PciSourceId;
+ PciSourceId->Bits.Bus = SourceId.Bits.Bus;
+ PciSourceId->Bits.Device = SourceId.Bits.Device;
+ PciSourceId->Bits.Function = SourceId.Bits.Function;
+
+ DEBUG ((DEBUG_INFO, " RegisterPciDevice: PCI S%04x B%02x D%02x F%02x", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ PciDeviceId = &PciDeviceInfo->PciDeviceData[PciDeviceInfo->PciDeviceDataNumber].PciDeviceId;
+ if ((DeviceType == EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT) ||
+ (DeviceType == EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE)) {
+ PciDeviceId->VendorId = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_VENDOR_ID_OFFSET));
+ PciDeviceId->DeviceId = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_DEVICE_ID_OFFSET));
+ PciDeviceId->RevisionId = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_REVISION_ID_OFFSET));
+
+ DEBUG ((DEBUG_INFO, " (%04x:%04x:%02x", PciDeviceId->VendorId, PciDeviceId->DeviceId, PciDeviceId->RevisionId));
+
+ if (DeviceType == EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT) {
+ PciDeviceId->SubsystemVendorId = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_SUBSYSTEM_VENDOR_ID_OFFSET));
+ PciDeviceId->SubsystemDeviceId = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_SUBSYSTEM_ID_OFFSET));
+ DEBUG ((DEBUG_INFO, ":%04x:%04x", PciDeviceId->SubsystemVendorId, PciDeviceId->SubsystemDeviceId));
+ }
+ DEBUG ((DEBUG_INFO, ")"));
+ }
+
+ PciDeviceInfo->PciDeviceData[PciDeviceInfo->PciDeviceDataNumber].DeviceType = DeviceType;
+
+ if ((DeviceType != EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT) &&
+ (DeviceType != EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE)) {
+ DEBUG ((DEBUG_INFO, " (*)"));
+ }
+ DEBUG ((DEBUG_INFO, "\n"));
+
+ PciDeviceInfo->PciDeviceDataNumber++;
+ } else {
+ if (CheckExist) {
+ DEBUG ((DEBUG_INFO, " RegisterPciDevice: PCI S%04x B%02x D%02x F%02x already registered\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ return EFI_ALREADY_STARTED;
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ The scan bus callback function to register PCI device.
+
+ @param[in] Context The context of the callback.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Device The device of the source.
+ @param[in] Function The function of the source.
+
+ @retval EFI_SUCCESS The PCI device is registered.
+**/
+EFI_STATUS
+EFIAPI
+ScanBusCallbackRegisterPciDevice (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN UINT8 Device,
+ IN UINT8 Function
+ )
+{
+ VTD_SOURCE_ID SourceId;
+ UINTN VtdIndex;
+ UINT8 BaseClass;
+ UINT8 SubClass;
+ UINT8 DeviceType;
+ EFI_STATUS Status;
+
+ VtdIndex = (UINTN)Context;
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+
+ DeviceType = EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT;
+ BaseClass = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_CLASSCODE_OFFSET + 2));
+ if (BaseClass == PCI_CLASS_BRIDGE) {
+ SubClass = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_CLASSCODE_OFFSET + 1));
+ if (SubClass == PCI_CLASS_BRIDGE_P2P) {
+ DeviceType = EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE;
+ }
+ }
+
+ Status = RegisterPciDevice (VtdIndex, Segment, SourceId, DeviceType, FALSE);
+ return Status;
+}
+
+/**
+ Scan PCI bus and invoke callback function for each PCI devices under the bus.
+
+ @param[in] Context The context of the callback function.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Callback The callback function in PCI scan.
+
+ @retval EFI_SUCCESS The PCI devices under the bus are scaned.
+**/
+EFI_STATUS
+ScanPciBus (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN SCAN_BUS_FUNC_CALLBACK_FUNC Callback
+ )
+{
+ UINT8 Device;
+ UINT8 Function;
+ UINT8 SecondaryBusNumber;
+ UINT8 HeaderType;
+ UINT8 BaseClass;
+ UINT8 SubClass;
+ UINT16 VendorID;
+ UINT16 DeviceID;
+ EFI_STATUS Status;
+
+ // Scan the PCI bus for devices
+ for (Device = 0; Device <= PCI_MAX_DEVICE; Device++) {
+ for (Function = 0; Function <= PCI_MAX_FUNC; Function++) {
+ VendorID = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_VENDOR_ID_OFFSET));
+ DeviceID = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_DEVICE_ID_OFFSET));
+ if (VendorID == 0xFFFF && DeviceID == 0xFFFF) {
+ if (Function == 0) {
+ //
+ // If function 0 is not implemented, do not scan other functions.
+ //
+ break;
+ }
+ continue;
+ }
+
+ Status = Callback (Context, Segment, Bus, Device, Function);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ BaseClass = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_CLASSCODE_OFFSET + 2));
+ if (BaseClass == PCI_CLASS_BRIDGE) {
+ SubClass = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_CLASSCODE_OFFSET + 1));
+ if (SubClass == PCI_CLASS_BRIDGE_P2P) {
+ SecondaryBusNumber = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_BRIDGE_SECONDARY_BUS_REGISTER_OFFSET));
+ DEBUG ((DEBUG_INFO," ScanPciBus: PCI bridge S%04x B%02x D%02x F%02x (SecondBus:%02x)\n", Segment, Bus, Device, Function, SecondaryBusNumber));
+ if (SecondaryBusNumber != 0) {
+ Status = ScanPciBus (Context, Segment, SecondaryBusNumber, Callback);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ }
+ }
+ }
+
+ if (Function == 0) {
+ HeaderType = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, 0, PCI_HEADER_TYPE_OFFSET));
+ if ((HeaderType & HEADER_TYPE_MULTI_FUNCTION) == 0x00) {
+ //
+ // It is not a multi-function device, do not scan other functions.
+ //
+ break;
+ }
+ }
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Scan PCI bus and invoke callback function for each PCI devices under all root bus.
+
+ @param[in] Context The context of the callback function.
+ @param[in] Segment The segment of the source.
+ @param[in] Callback The callback function in PCI scan.
+
+ @retval EFI_SUCCESS The PCI devices under the bus are scaned.
+**/
+EFI_STATUS
+ScanAllPciBus (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN SCAN_BUS_FUNC_CALLBACK_FUNC Callback
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+ UINTN HandleCount;
+ EFI_HANDLE *HandleBuffer;
+ EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL *PciRootBridgeIo;
+ EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR *Descriptors;
+
+ DEBUG ((DEBUG_INFO, "ScanAllPciBus ()\n"));
+
+ Status = gBS->LocateHandleBuffer (
+ ByProtocol,
+ &gEfiPciRootBridgeIoProtocolGuid,
+ NULL,
+ &HandleCount,
+ &HandleBuffer
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ DEBUG ((DEBUG_INFO,"Find %d root bridges\n", HandleCount));
+
+ for (Index = 0; Index < HandleCount; Index++) {
+ Status = gBS->HandleProtocol (
+ HandleBuffer[Index],
+ &gEfiPciRootBridgeIoProtocolGuid,
+ (VOID **) &PciRootBridgeIo
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ Status = PciRootBridgeIo->Configuration (PciRootBridgeIo, (VOID **) &Descriptors);
+ ASSERT_EFI_ERROR (Status);
+
+ while (Descriptors->Desc != ACPI_END_TAG_DESCRIPTOR) {
+ if (Descriptors->ResType == ACPI_ADDRESS_SPACE_TYPE_BUS) {
+ break;
+ }
+ Descriptors++;
+ }
+
+ if (Descriptors->Desc == ACPI_END_TAG_DESCRIPTOR) {
+ continue;
+ }
+
+ DEBUG ((DEBUG_INFO,"Scan root bridges : %d, Segment : %d, Bus : 0x%02X\n", Index, PciRootBridgeIo->SegmentNumber, Descriptors->AddrRangeMin));
+ Status = ScanPciBus(Context, (UINT16) PciRootBridgeIo->SegmentNumber, (UINT8) Descriptors->AddrRangeMin, Callback);
+ if (EFI_ERROR (Status)) {
+ break;
+ }
+ }
+
+ FreePool(HandleBuffer);
+
+ return Status;
+}
+
+/**
+ Find the VTd index by the Segment and SourceId.
+
+ @param[in] Segment The segment of the source.
+ @param[in] SourceId The SourceId of the source.
+ @param[out] ExtContextEntry The ExtContextEntry of the source.
+ @param[out] ContextEntry The ContextEntry of the source.
+
+ @return The index of the VTd engine.
+ @retval (UINTN)-1 The VTd engine is not found.
+**/
+UINTN
+FindVtdIndexByPciDevice (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ OUT VTD_EXT_CONTEXT_ENTRY **ExtContextEntry,
+ OUT VTD_CONTEXT_ENTRY **ContextEntry
+ )
+{
+ UINTN VtdIndex;
+ VTD_ROOT_ENTRY *RootEntry;
+ VTD_CONTEXT_ENTRY *ContextEntryTable;
+ VTD_CONTEXT_ENTRY *ThisContextEntry;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntry;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntryTable;
+ VTD_EXT_CONTEXT_ENTRY *ThisExtContextEntry;
+ UINTN PciDataIndex;
+
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ if (Segment != mVtdUnitInformation[VtdIndex].Segment) {
+ continue;
+ }
+
+ PciDataIndex = GetPciDataIndex (VtdIndex, Segment, SourceId);
+ if (PciDataIndex == (UINTN)-1) {
+ continue;
+ }
+
+// DEBUG ((DEBUG_INFO,"FindVtdIndex(0x%x) for S%04x B%02x D%02x F%02x\n", VtdIndex, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ if (mVtdUnitInformation[VtdIndex].ExtRootEntryTable != 0) {
+ ExtRootEntry = &mVtdUnitInformation[VtdIndex].ExtRootEntryTable[SourceId.Index.RootIndex];
+ ExtContextEntryTable = (VTD_EXT_CONTEXT_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(ExtRootEntry->Bits.LowerContextTablePointerLo, ExtRootEntry->Bits.LowerContextTablePointerHi) ;
+ ThisExtContextEntry = &ExtContextEntryTable[SourceId.Index.ContextIndex];
+ if (ThisExtContextEntry->Bits.AddressWidth == 0) {
+ continue;
+ }
+ *ExtContextEntry = ThisExtContextEntry;
+ *ContextEntry = NULL;
+ } else {
+ RootEntry = &mVtdUnitInformation[VtdIndex].RootEntryTable[SourceId.Index.RootIndex];
+ ContextEntryTable = (VTD_CONTEXT_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(RootEntry->Bits.ContextTablePointerLo, RootEntry->Bits.ContextTablePointerHi) ;
+ ThisContextEntry = &ContextEntryTable[SourceId.Index.ContextIndex];
+ if (ThisContextEntry->Bits.AddressWidth == 0) {
+ continue;
+ }
+ *ExtContextEntry = NULL;
+ *ContextEntry = ThisContextEntry;
+ }
+
+ return VtdIndex;
+ }
+
+ return (UINTN)-1;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTable.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTable.c
new file mode 100644
index 000000000..37ca6e405
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTable.c
@@ -0,0 +1,1112 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+/**
+ Create extended context entry.
+
+ @param[in] VtdIndex The index of the VTd engine.
+
+ @retval EFI_SUCCESS The extended context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create extended context entry.
+**/
+EFI_STATUS
+CreateExtContextEntry (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Allocate zero pages.
+
+ @param[in] Pages the number of pages.
+
+ @return the page address.
+ @retval NULL No resource to allocate pages.
+**/
+VOID *
+EFIAPI
+AllocateZeroPages (
+ IN UINTN Pages
+ )
+{
+ VOID *Addr;
+
+ Addr = AllocatePages (Pages);
+ if (Addr == NULL) {
+ return NULL;
+ }
+ ZeroMem (Addr, EFI_PAGES_TO_SIZE(Pages));
+ return Addr;
+}
+
+/**
+ Set second level paging entry attribute based upon IoMmuAccess.
+
+ @param[in] PtEntry The paging entry.
+ @param[in] IoMmuAccess The IOMMU access.
+**/
+VOID
+SetSecondLevelPagingEntryAttribute (
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PtEntry,
+ IN UINT64 IoMmuAccess
+ )
+{
+ PtEntry->Bits.Read = ((IoMmuAccess & EDKII_IOMMU_ACCESS_READ) != 0);
+ PtEntry->Bits.Write = ((IoMmuAccess & EDKII_IOMMU_ACCESS_WRITE) != 0);
+}
+
+/**
+ Create context entry.
+
+ @param[in] VtdIndex The index of the VTd engine.
+
+ @retval EFI_SUCCESS The context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create context entry.
+**/
+EFI_STATUS
+CreateContextEntry (
+ IN UINTN VtdIndex
+ )
+{
+ UINTN Index;
+ VOID *Buffer;
+ UINTN RootPages;
+ UINTN ContextPages;
+ VTD_ROOT_ENTRY *RootEntry;
+ VTD_CONTEXT_ENTRY *ContextEntryTable;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+ VTD_SOURCE_ID *PciSourceId;
+ VTD_SOURCE_ID SourceId;
+ UINTN MaxBusNumber;
+ UINTN EntryTablePages;
+
+ MaxBusNumber = 0;
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+ if (PciSourceId->Bits.Bus > MaxBusNumber) {
+ MaxBusNumber = PciSourceId->Bits.Bus;
+ }
+ }
+ DEBUG ((DEBUG_INFO," MaxBusNumber - 0x%x\n", MaxBusNumber));
+
+ RootPages = EFI_SIZE_TO_PAGES (sizeof (VTD_ROOT_ENTRY) * VTD_ROOT_ENTRY_NUMBER);
+ ContextPages = EFI_SIZE_TO_PAGES (sizeof (VTD_CONTEXT_ENTRY) * VTD_CONTEXT_ENTRY_NUMBER);
+ EntryTablePages = RootPages + ContextPages * (MaxBusNumber + 1);
+ Buffer = AllocateZeroPages (EntryTablePages);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_INFO,"Could not Alloc Root Entry Table.. \n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+ mVtdUnitInformation[VtdIndex].RootEntryTable = (VTD_ROOT_ENTRY *)Buffer;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (RootPages);
+
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+
+ SourceId.Bits.Bus = PciSourceId->Bits.Bus;
+ SourceId.Bits.Device = PciSourceId->Bits.Device;
+ SourceId.Bits.Function = PciSourceId->Bits.Function;
+
+ RootEntry = &mVtdUnitInformation[VtdIndex].RootEntryTable[SourceId.Index.RootIndex];
+ if (RootEntry->Bits.Present == 0) {
+ RootEntry->Bits.ContextTablePointerLo = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 12);
+ RootEntry->Bits.ContextTablePointerHi = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 32);
+ RootEntry->Bits.Present = 1;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (ContextPages);
+ }
+
+ ContextEntryTable = (VTD_CONTEXT_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(RootEntry->Bits.ContextTablePointerLo, RootEntry->Bits.ContextTablePointerHi) ;
+ ContextEntry = &ContextEntryTable[SourceId.Index.ContextIndex];
+ ContextEntry->Bits.TranslationType = 0;
+ ContextEntry->Bits.FaultProcessingDisable = 0;
+ ContextEntry->Bits.Present = 0;
+
+ DEBUG ((DEBUG_INFO,"Source: S%04x B%02x D%02x F%02x\n", mVtdUnitInformation[VtdIndex].Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = FALSE;
+ if ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT3) != 0) {
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = TRUE;
+ if ((mAcpiDmarTable->HostAddressWidth <= 48) &&
+ ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT2) != 0)) {
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = FALSE;
+ }
+ } else if ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT2) == 0) {
+ DEBUG((DEBUG_ERROR, "!!!! Page-table type is not supported on VTD %d !!!!\n", VtdIndex));
+ return EFI_UNSUPPORTED;
+ }
+
+ if (mVtdUnitInformation[VtdIndex].Is5LevelPaging) {
+ ContextEntry->Bits.AddressWidth = 0x3;
+ DEBUG((DEBUG_INFO, "Using 5-level page-table on VTD %d\n", VtdIndex));
+ } else {
+ ContextEntry->Bits.AddressWidth = 0x2;
+ DEBUG((DEBUG_INFO, "Using 4-level page-table on VTD %d\n", VtdIndex));
+ }
+ }
+
+ FlushPageTableMemory (VtdIndex, (UINTN)mVtdUnitInformation[VtdIndex].RootEntryTable, EFI_PAGES_TO_SIZE(EntryTablePages));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Create second level paging entry table.
+
+ @param[in] VtdIndex The index of the VTd engine.
+ @param[in] SecondLevelPagingEntry The second level paging entry.
+ @param[in] MemoryBase The base of the memory.
+ @param[in] MemoryLimit The limit of the memory.
+ @param[in] IoMmuAccess The IOMMU access.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+
+ @return The second level paging entry.
+**/
+VTD_SECOND_LEVEL_PAGING_ENTRY *
+CreateSecondLevelPagingEntryTable (
+ IN UINTN VtdIndex,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 MemoryBase,
+ IN UINT64 MemoryLimit,
+ IN UINT64 IoMmuAccess,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index5;
+ UINTN Index4;
+ UINTN Index3;
+ UINTN Index2;
+ UINTN Lvl5Start;
+ UINTN Lvl5End;
+ UINTN Lvl4PagesStart;
+ UINTN Lvl4PagesEnd;
+ UINTN Lvl4Start;
+ UINTN Lvl4End;
+ UINTN Lvl3Start;
+ UINTN Lvl3End;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl5PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl4PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl3PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl2PtEntry;
+ UINT64 BaseAddress;
+ UINT64 EndAddress;
+
+ if (MemoryLimit == 0) {
+ return NULL;
+ }
+
+ Lvl4PagesStart = 0;
+ Lvl4PagesEnd = 0;
+ Lvl4PtEntry = NULL;
+ Lvl5PtEntry = NULL;
+
+ BaseAddress = ALIGN_VALUE_LOW(MemoryBase, SIZE_2MB);
+ EndAddress = ALIGN_VALUE_UP(MemoryLimit, SIZE_2MB);
+ DEBUG ((DEBUG_INFO,"CreateSecondLevelPagingEntryTable: BaseAddress - 0x%016lx, EndAddress - 0x%016lx\n", BaseAddress, EndAddress));
+
+ if (SecondLevelPagingEntry == NULL) {
+ SecondLevelPagingEntry = AllocateZeroPages (1);
+ if (SecondLevelPagingEntry == NULL) {
+ DEBUG ((DEBUG_ERROR,"Could not Alloc LVL4 or LVL5 PT. \n"));
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)SecondLevelPagingEntry, EFI_PAGES_TO_SIZE(1));
+ }
+
+ //
+ // If no access is needed, just create not present entry.
+ //
+ if (IoMmuAccess == 0) {
+ return SecondLevelPagingEntry;
+ }
+
+ if (Is5LevelPaging) {
+ Lvl5Start = RShiftU64 (BaseAddress, 48) & 0x1FF;
+ Lvl5End = RShiftU64 (EndAddress - 1, 48) & 0x1FF;
+ DEBUG ((DEBUG_INFO," Lvl5Start - 0x%x, Lvl5End - 0x%x\n", Lvl5Start, Lvl5End));
+
+ Lvl4Start = RShiftU64 (BaseAddress, 39) & 0x1FF;
+ Lvl4End = RShiftU64 (EndAddress - 1, 39) & 0x1FF;
+
+ Lvl4PagesStart = (Lvl5Start<<9) | Lvl4Start;
+ Lvl4PagesEnd = (Lvl5End<<9) | Lvl4End;
+ DEBUG ((DEBUG_INFO," Lvl4PagesStart - 0x%x, Lvl4PagesEnd - 0x%x\n", Lvl4PagesStart, Lvl4PagesEnd));
+
+ Lvl5PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+ } else {
+ Lvl5Start = RShiftU64 (BaseAddress, 48) & 0x1FF;
+ Lvl5End = Lvl5Start;
+
+ Lvl4Start = RShiftU64 (BaseAddress, 39) & 0x1FF;
+ Lvl4End = RShiftU64 (EndAddress - 1, 39) & 0x1FF;
+ DEBUG ((DEBUG_INFO," Lvl4Start - 0x%x, Lvl4End - 0x%x\n", Lvl4Start, Lvl4End));
+
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+ }
+
+ for (Index5 = Lvl5Start; Index5 <= Lvl5End; Index5++) {
+ if (Is5LevelPaging) {
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ Lvl5PtEntry[Index5].Uint64 = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index5));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)Lvl5PtEntry[Index5].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl5PtEntry[Index5], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+ Lvl4Start = Lvl4PagesStart & 0x1FF;
+ if (((Index5+1)<<9) > Lvl4PagesEnd) {
+ Lvl4End = SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) - 1;;
+ Lvl4PagesStart = (Index5+1)<<9;
+ } else {
+ Lvl4End = Lvl4PagesEnd & 0x1FF;
+ }
+ DEBUG ((DEBUG_INFO," Lvl5(0x%x): Lvl4Start - 0x%x, Lvl4End - 0x%x\n", Index5, Lvl4Start, Lvl4End));
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl5PtEntry[Index5].Bits.AddressLo, Lvl5PtEntry[Index5].Bits.AddressHi);
+ }
+
+ for (Index4 = Lvl4Start; Index4 <= Lvl4End; Index4++) {
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ Lvl4PtEntry[Index4].Uint64 = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)Lvl4PtEntry[Index4].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl4PtEntry[Index4], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+
+ Lvl3Start = RShiftU64 (BaseAddress, 30) & 0x1FF;
+ if (ALIGN_VALUE_LOW(BaseAddress + SIZE_1GB, SIZE_1GB) <= EndAddress) {
+ Lvl3End = SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) - 1;
+ } else {
+ Lvl3End = RShiftU64 (EndAddress - 1, 30) & 0x1FF;
+ }
+ DEBUG ((DEBUG_INFO," Lvl4(0x%x): Lvl3Start - 0x%x, Lvl3End - 0x%x\n", Index4, Lvl3Start, Lvl3End));
+
+ Lvl3PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl4PtEntry[Index4].Bits.AddressLo, Lvl4PtEntry[Index4].Bits.AddressHi);
+ for (Index3 = Lvl3Start; Index3 <= Lvl3End; Index3++) {
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ Lvl3PtEntry[Index3].Uint64 = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL3 PAGE FAIL (0x%x, 0x%x)!!!!!!\n", Index4, Index3));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)Lvl3PtEntry[Index3].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl3PtEntry[Index3], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+
+ Lvl2PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl3PtEntry[Index3].Bits.AddressLo, Lvl3PtEntry[Index3].Bits.AddressHi);
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index2++) {
+ Lvl2PtEntry[Index2].Uint64 = BaseAddress;
+ SetSecondLevelPagingEntryAttribute (&Lvl2PtEntry[Index2], IoMmuAccess);
+ Lvl2PtEntry[Index2].Bits.PageSize = 1;
+ BaseAddress += SIZE_2MB;
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)Lvl2PtEntry, SIZE_4KB);
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)&Lvl3PtEntry[Lvl3Start], (UINTN)&Lvl3PtEntry[Lvl3End + 1] - (UINTN)&Lvl3PtEntry[Lvl3Start]);
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)&Lvl4PtEntry[Lvl4Start], (UINTN)&Lvl4PtEntry[Lvl4End + 1] - (UINTN)&Lvl4PtEntry[Lvl4Start]);
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)&Lvl5PtEntry[Lvl5Start], (UINTN)&Lvl5PtEntry[Lvl5End + 1] - (UINTN)&Lvl5PtEntry[Lvl5Start]);
+
+ return SecondLevelPagingEntry;
+}
+
+/**
+ Create second level paging entry.
+
+ @param[in] VtdIndex The index of the VTd engine.
+ @param[in] IoMmuAccess The IOMMU access.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+
+ @return The second level paging entry.
+**/
+VTD_SECOND_LEVEL_PAGING_ENTRY *
+CreateSecondLevelPagingEntry (
+ IN UINTN VtdIndex,
+ IN UINT64 IoMmuAccess,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+
+ SecondLevelPagingEntry = NULL;
+ SecondLevelPagingEntry = CreateSecondLevelPagingEntryTable (VtdIndex, SecondLevelPagingEntry, 0, mBelow4GMemoryLimit, IoMmuAccess, Is5LevelPaging);
+ if (SecondLevelPagingEntry == NULL) {
+ return NULL;
+ }
+
+ if (mAbove4GMemoryLimit != 0) {
+ ASSERT (mAbove4GMemoryLimit > BASE_4GB);
+ SecondLevelPagingEntry = CreateSecondLevelPagingEntryTable (VtdIndex, SecondLevelPagingEntry, SIZE_4GB, mAbove4GMemoryLimit, IoMmuAccess, Is5LevelPaging);
+ if (SecondLevelPagingEntry == NULL) {
+ return NULL;
+ }
+ }
+
+ return SecondLevelPagingEntry;
+}
+
+/**
+ Setup VTd translation table.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCE Setup translation table fail.
+**/
+EFI_STATUS
+SetupTranslationTable (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ DEBUG((DEBUG_INFO, "CreateContextEntry - %d\n", Index));
+
+ if (mVtdUnitInformation[Index].ECapReg.Bits.SMTS) {
+ if (mVtdUnitInformation[Index].ECapReg.Bits.DEP_24) {
+ DEBUG ((DEBUG_ERROR,"ECapReg.bit24 is not zero\n"));
+ ASSERT(FALSE);
+ Status = EFI_UNSUPPORTED;
+ } else {
+ Status = CreateContextEntry (Index);
+ }
+ } else {
+ if (mVtdUnitInformation[Index].ECapReg.Bits.DEP_24) {
+ //
+ // To compatible with pervious VTd engine
+ // It was ECS(Extended Context Support) bit.
+ //
+ Status = CreateExtContextEntry (Index);
+ } else {
+ Status = CreateContextEntry (Index);
+ }
+ }
+
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Dump DMAR second level paging entry.
+
+ @param[in] SecondLevelPagingEntry The second level paging entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+DumpSecondLevelPagingEntry (
+ IN VOID *SecondLevelPagingEntry,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index5;
+ UINTN Index4;
+ UINTN Index3;
+ UINTN Index2;
+ UINTN Index1;
+ UINTN Lvl5IndexEnd;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl5PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl4PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl3PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl2PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl1PtEntry;
+
+ DEBUG ((DEBUG_VERBOSE,"================\n"));
+ DEBUG ((DEBUG_VERBOSE,"DMAR Second Level Page Table:\n"));
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry Base - 0x%x, Is5LevelPaging - %d\n", SecondLevelPagingEntry, Is5LevelPaging));
+
+ Lvl5IndexEnd = Is5LevelPaging ? SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) : 1;
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+ Lvl5PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+
+ for (Index5 = 0; Index5 < Lvl5IndexEnd; Index5++) {
+ if (Is5LevelPaging) {
+ if (Lvl5PtEntry[Index5].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl5Pt Entry(0x%03x) - 0x%016lx\n", Index5, Lvl5PtEntry[Index5].Uint64));
+ }
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ continue;
+ }
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl5PtEntry[Index5].Bits.AddressLo, Lvl5PtEntry[Index5].Bits.AddressHi);
+ }
+
+ for (Index4 = 0; Index4 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index4++) {
+ if (Lvl4PtEntry[Index4].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl4Pt Entry(0x%03x) - 0x%016lx\n", Index4, Lvl4PtEntry[Index4].Uint64));
+ }
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ continue;
+ }
+ Lvl3PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl4PtEntry[Index4].Bits.AddressLo, Lvl4PtEntry[Index4].Bits.AddressHi);
+ for (Index3 = 0; Index3 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index3++) {
+ if (Lvl3PtEntry[Index3].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl3Pt Entry(0x%03x) - 0x%016lx\n", Index3, Lvl3PtEntry[Index3].Uint64));
+ }
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ continue;
+ }
+
+ Lvl2PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl3PtEntry[Index3].Bits.AddressLo, Lvl3PtEntry[Index3].Bits.AddressHi);
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index2++) {
+ if (Lvl2PtEntry[Index2].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl2Pt Entry(0x%03x) - 0x%016lx\n", Index2, Lvl2PtEntry[Index2].Uint64));
+ }
+ if (Lvl2PtEntry[Index2].Uint64 == 0) {
+ continue;
+ }
+ if (Lvl2PtEntry[Index2].Bits.PageSize == 0) {
+ Lvl1PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl2PtEntry[Index2].Bits.AddressLo, Lvl2PtEntry[Index2].Bits.AddressHi);
+ for (Index1 = 0; Index1 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index1++) {
+ if (Lvl1PtEntry[Index1].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl1Pt Entry(0x%03x) - 0x%016lx\n", Index1, Lvl1PtEntry[Index1].Uint64));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ DEBUG ((DEBUG_VERBOSE,"================\n"));
+}
+
+/**
+ Invalid page entry.
+
+ @param VtdIndex The VTd engine index.
+**/
+VOID
+InvalidatePageEntry (
+ IN UINTN VtdIndex
+ )
+{
+ if (mVtdUnitInformation[VtdIndex].HasDirtyContext || mVtdUnitInformation[VtdIndex].HasDirtyPages) {
+ InvalidateVtdIOTLBGlobal (VtdIndex);
+ }
+ mVtdUnitInformation[VtdIndex].HasDirtyContext = FALSE;
+ mVtdUnitInformation[VtdIndex].HasDirtyPages = FALSE;
+}
+
+#define VTD_PG_R BIT0
+#define VTD_PG_W BIT1
+#define VTD_PG_X BIT2
+#define VTD_PG_EMT (BIT3 | BIT4 | BIT5)
+#define VTD_PG_TM (BIT62)
+
+#define VTD_PG_PS BIT7
+
+#define PAGE_PROGATE_BITS (VTD_PG_TM | VTD_PG_EMT | VTD_PG_W | VTD_PG_R)
+
+#define PAGING_4K_MASK 0xFFF
+#define PAGING_2M_MASK 0x1FFFFF
+#define PAGING_1G_MASK 0x3FFFFFFF
+
+#define PAGING_VTD_INDEX_MASK 0x1FF
+
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
+
+typedef enum {
+ PageNone,
+ Page4K,
+ Page2M,
+ Page1G,
+} PAGE_ATTRIBUTE;
+
+typedef struct {
+ PAGE_ATTRIBUTE Attribute;
+ UINT64 Length;
+ UINT64 AddressMask;
+} PAGE_ATTRIBUTE_TABLE;
+
+PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
+ {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64},
+ {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64},
+ {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64},
+};
+
+/**
+ Return length according to page attributes.
+
+ @param[in] PageAttributes The page attribute of the page entry.
+
+ @return The length of page entry.
+**/
+UINTN
+PageAttributeToLength (
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINTN Index;
+ for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
+ if (PageAttribute == mPageAttributeTable[Index].Attribute) {
+ return (UINTN)mPageAttributeTable[Index].Length;
+ }
+ }
+ return 0;
+}
+
+/**
+ Return page table entry to match the address.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] Address The address to be checked.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+ @param[out] PageAttributes The page attribute of the page entry.
+
+ @return The page entry.
+**/
+VOID *
+GetSecondLevelPageTableEntry (
+ IN UINTN VtdIndex,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN PHYSICAL_ADDRESS Address,
+ IN BOOLEAN Is5LevelPaging,
+ OUT PAGE_ATTRIBUTE *PageAttribute
+ )
+{
+ UINTN Index1;
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINTN Index5;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ UINT64 *L5PageTable;
+
+ Index5 = ((UINTN)RShiftU64 (Address, 48)) & PAGING_VTD_INDEX_MASK;
+ Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_VTD_INDEX_MASK;
+ Index3 = ((UINTN)Address >> 30) & PAGING_VTD_INDEX_MASK;
+ Index2 = ((UINTN)Address >> 21) & PAGING_VTD_INDEX_MASK;
+ Index1 = ((UINTN)Address >> 12) & PAGING_VTD_INDEX_MASK;
+
+ if (Is5LevelPaging) {
+ L5PageTable = (UINT64 *)SecondLevelPagingEntry;
+ if (L5PageTable[Index5] == 0) {
+ L5PageTable[Index5] = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (L5PageTable[Index5] == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL5 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)L5PageTable[Index5], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *)&L5PageTable[Index5], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)&L5PageTable[Index5], sizeof(L5PageTable[Index5]));
+ }
+ L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & PAGING_4K_ADDRESS_MASK_64);
+ } else {
+ L4PageTable = (UINT64 *)SecondLevelPagingEntry;
+ }
+
+ if (L4PageTable[Index4] == 0) {
+ L4PageTable[Index4] = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (L4PageTable[Index4] == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)L4PageTable[Index4], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *)&L4PageTable[Index4], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)&L4PageTable[Index4], sizeof(L4PageTable[Index4]));
+ }
+
+ L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
+ if (L3PageTable[Index3] == 0) {
+ L3PageTable[Index3] = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (L3PageTable[Index3] == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL3 PAGE FAIL (0x%x, 0x%x)!!!!!!\n", Index4, Index3));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)L3PageTable[Index3], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *)&L3PageTable[Index3], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)&L3PageTable[Index3], sizeof(L3PageTable[Index3]));
+ }
+ if ((L3PageTable[Index3] & VTD_PG_PS) != 0) {
+ // 1G
+ *PageAttribute = Page1G;
+ return &L3PageTable[Index3];
+ }
+
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable[Index2] == 0) {
+ L2PageTable[Index2] = Address & PAGING_2M_ADDRESS_MASK_64;
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *)&L2PageTable[Index2], 0);
+ L2PageTable[Index2] |= VTD_PG_PS;
+ FlushPageTableMemory (VtdIndex, (UINTN)&L2PageTable[Index2], sizeof(L2PageTable[Index2]));
+ }
+ if ((L2PageTable[Index2] & VTD_PG_PS) != 0) {
+ // 2M
+ *PageAttribute = Page2M;
+ return &L2PageTable[Index2];
+ }
+
+ // 4k
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
+ if ((L1PageTable[Index1] == 0) && (Address != 0)) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ *PageAttribute = Page4K;
+ return &L1PageTable[Index1];
+}
+
+/**
+ Modify memory attributes of page entry.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] PageEntry The page entry.
+ @param[in] IoMmuAccess The IOMMU access.
+ @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
+**/
+VOID
+ConvertSecondLevelPageEntryAttribute (
+ IN UINTN VtdIndex,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry,
+ IN UINT64 IoMmuAccess,
+ OUT BOOLEAN *IsModified
+ )
+{
+ UINT64 CurrentPageEntry;
+ UINT64 NewPageEntry;
+
+ CurrentPageEntry = PageEntry->Uint64;
+ SetSecondLevelPagingEntryAttribute (PageEntry, IoMmuAccess);
+ FlushPageTableMemory (VtdIndex, (UINTN)PageEntry, sizeof(*PageEntry));
+ NewPageEntry = PageEntry->Uint64;
+ if (CurrentPageEntry != NewPageEntry) {
+ *IsModified = TRUE;
+ DEBUG ((DEBUG_VERBOSE, "ConvertSecondLevelPageEntryAttribute 0x%lx", CurrentPageEntry));
+ DEBUG ((DEBUG_VERBOSE, "->0x%lx\n", NewPageEntry));
+ } else {
+ *IsModified = FALSE;
+ }
+}
+
+/**
+ This function returns if there is need to split page entry.
+
+ @param[in] BaseAddress The base address to be checked.
+ @param[in] Length The length to be checked.
+ @param[in] PageAttribute The page attribute of the page entry.
+
+ @retval SplitAttributes on if there is need to split page entry.
+**/
+PAGE_ATTRIBUTE
+NeedSplitPage (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINT64 PageEntryLength;
+
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+
+ if (((BaseAddress & (PageEntryLength - 1)) == 0) && (Length >= PageEntryLength)) {
+ return PageNone;
+ }
+
+ if (((BaseAddress & PAGING_2M_MASK) != 0) || (Length < SIZE_2MB)) {
+ return Page4K;
+ }
+
+ return Page2M;
+}
+
+/**
+ This function splits one page entry to small page entries.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] PageEntry The page entry to be splitted.
+ @param[in] PageAttribute The page attribute of the page entry.
+ @param[in] SplitAttribute How to split the page entry.
+
+ @retval RETURN_SUCCESS The page entry is splitted.
+ @retval RETURN_UNSUPPORTED The page entry does not support to be splitted.
+ @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
+**/
+RETURN_STATUS
+SplitSecondLevelPage (
+ IN UINTN VtdIndex,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry,
+ IN PAGE_ATTRIBUTE PageAttribute,
+ IN PAGE_ATTRIBUTE SplitAttribute
+ )
+{
+ UINT64 BaseAddress;
+ UINT64 *NewPageEntry;
+ UINTN Index;
+
+ ASSERT (PageAttribute == Page2M || PageAttribute == Page1G);
+
+ if (PageAttribute == Page2M) {
+ //
+ // Split 2M to 4K
+ //
+ ASSERT (SplitAttribute == Page4K);
+ if (SplitAttribute == Page4K) {
+ NewPageEntry = AllocateZeroPages (1);
+ DEBUG ((DEBUG_VERBOSE, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = PageEntry->Uint64 & PAGING_2M_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_4KB * Index) | (PageEntry->Uint64 & PAGE_PROGATE_BITS);
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)NewPageEntry, SIZE_4KB);
+
+ PageEntry->Uint64 = (UINT64)(UINTN)NewPageEntry;
+ SetSecondLevelPagingEntryAttribute (PageEntry, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)PageEntry, sizeof(*PageEntry));
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else if (PageAttribute == Page1G) {
+ //
+ // Split 1G to 2M
+ // No need support 1G->4K directly, we should use 1G->2M, then 2M->4K to get more compact page table.
+ //
+ ASSERT (SplitAttribute == Page2M || SplitAttribute == Page4K);
+ if ((SplitAttribute == Page2M || SplitAttribute == Page4K)) {
+ NewPageEntry = AllocateZeroPages (1);
+ DEBUG ((DEBUG_VERBOSE, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = PageEntry->Uint64 & PAGING_1G_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_2MB * Index) | VTD_PG_PS | (PageEntry->Uint64 & PAGE_PROGATE_BITS);
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)NewPageEntry, SIZE_4KB);
+
+ PageEntry->Uint64 = (UINT64)(UINTN)NewPageEntry;
+ SetSecondLevelPagingEntryAttribute (PageEntry, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)PageEntry, sizeof(*PageEntry));
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+}
+
+/**
+ Set VTd attribute for a system memory on second level page entry
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] DomainIdentifier The domain ID of the source.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetSecondLevelPagingAttribute (
+ IN UINTN VtdIndex,
+ IN UINT16 DomainIdentifier,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry;
+ PAGE_ATTRIBUTE PageAttribute;
+ UINTN PageEntryLength;
+ PAGE_ATTRIBUTE SplitAttribute;
+ EFI_STATUS Status;
+ BOOLEAN IsEntryModified;
+
+ DEBUG ((DEBUG_VERBOSE,"SetSecondLevelPagingAttribute (%d) (0x%016lx - 0x%016lx : %x) \n", VtdIndex, BaseAddress, Length, IoMmuAccess));
+ DEBUG ((DEBUG_VERBOSE," SecondLevelPagingEntry Base - 0x%x\n", SecondLevelPagingEntry));
+
+ if (BaseAddress != ALIGN_VALUE(BaseAddress, SIZE_4KB)) {
+ DEBUG ((DEBUG_ERROR, "SetSecondLevelPagingAttribute - Invalid Alignment\n"));
+ return EFI_UNSUPPORTED;
+ }
+ if (Length != ALIGN_VALUE(Length, SIZE_4KB)) {
+ DEBUG ((DEBUG_ERROR, "SetSecondLevelPagingAttribute - Invalid Alignment\n"));
+ return EFI_UNSUPPORTED;
+ }
+
+ while (Length != 0) {
+ PageEntry = GetSecondLevelPageTableEntry (VtdIndex, SecondLevelPagingEntry, BaseAddress, mVtdUnitInformation[VtdIndex].Is5LevelPaging, &PageAttribute);
+ if (PageEntry == NULL) {
+ DEBUG ((DEBUG_ERROR, "PageEntry - NULL\n"));
+ return RETURN_UNSUPPORTED;
+ }
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+ SplitAttribute = NeedSplitPage (BaseAddress, Length, PageAttribute);
+ if (SplitAttribute == PageNone) {
+ ConvertSecondLevelPageEntryAttribute (VtdIndex, PageEntry, IoMmuAccess, &IsEntryModified);
+ if (IsEntryModified) {
+ mVtdUnitInformation[VtdIndex].HasDirtyPages = TRUE;
+ }
+ //
+ // Convert success, move to next
+ //
+ BaseAddress += PageEntryLength;
+ Length -= PageEntryLength;
+ } else {
+ Status = SplitSecondLevelPage (VtdIndex, PageEntry, PageAttribute, SplitAttribute);
+ if (RETURN_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "SplitSecondLevelPage - %r\n", Status));
+ return RETURN_UNSUPPORTED;
+ }
+ mVtdUnitInformation[VtdIndex].HasDirtyPages = TRUE;
+ //
+ // Just split current page
+ // Convert success in next around
+ //
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Set VTd attribute for a system memory.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] DomainIdentifier The domain ID of the source.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetPageAttribute (
+ IN UINTN VtdIndex,
+ IN UINT16 DomainIdentifier,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ EFI_STATUS Status;
+ Status = EFI_NOT_FOUND;
+ if (SecondLevelPagingEntry != NULL) {
+ Status = SetSecondLevelPagingAttribute (VtdIndex, DomainIdentifier, SecondLevelPagingEntry, BaseAddress, Length, IoMmuAccess);
+ }
+ return Status;
+}
+
+/**
+ Set VTd attribute for a system memory.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetAccessAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ UINTN VtdIndex;
+ EFI_STATUS Status;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+ UINT64 Pt;
+ UINTN PciDataIndex;
+ UINT16 DomainIdentifier;
+
+ SecondLevelPagingEntry = NULL;
+
+ DEBUG ((DEBUG_VERBOSE,"SetAccessAttribute (S%04x B%02x D%02x F%02x) (0x%016lx - 0x%08x, %x)\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, BaseAddress, (UINTN)Length, IoMmuAccess));
+
+ VtdIndex = FindVtdIndexByPciDevice (Segment, SourceId, &ExtContextEntry, &ContextEntry);
+ if (VtdIndex == (UINTN)-1) {
+ DEBUG ((DEBUG_ERROR,"SetAccessAttribute - Pci device (S%04x B%02x D%02x F%02x) not found!\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ return EFI_DEVICE_ERROR;
+ }
+
+ PciDataIndex = GetPciDataIndex (VtdIndex, Segment, SourceId);
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[PciDataIndex].AccessCount++;
+ //
+ // DomainId should not be 0.
+ //
+ DomainIdentifier = (UINT16)(PciDataIndex + 1);
+
+ if (ExtContextEntry != NULL) {
+ if (ExtContextEntry->Bits.Present == 0) {
+ SecondLevelPagingEntry = CreateSecondLevelPagingEntry (VtdIndex, 0, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry - 0x%x (S%04x B%02x D%02x F%02x) New\n", SecondLevelPagingEntry, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ Pt = (UINT64)RShiftU64 ((UINT64)(UINTN)SecondLevelPagingEntry, 12);
+
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ExtContextEntry->Bits.DomainIdentifier = DomainIdentifier;
+ ExtContextEntry->Bits.Present = 1;
+ FlushPageTableMemory (VtdIndex, (UINTN)ExtContextEntry, sizeof(*ExtContextEntry));
+ VtdLibDumpDmarExtContextEntryTable (NULL, NULL, mVtdUnitInformation[VtdIndex].ExtRootEntryTable, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ mVtdUnitInformation[VtdIndex].HasDirtyContext = TRUE;
+ } else {
+ SecondLevelPagingEntry = (VOID *)(UINTN)VTD_64BITS_ADDRESS(ExtContextEntry->Bits.SecondLevelPageTranslationPointerLo, ExtContextEntry->Bits.SecondLevelPageTranslationPointerHi);
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry - 0x%x (S%04x B%02x D%02x F%02x)\n", SecondLevelPagingEntry, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ }
+ } else if (ContextEntry != NULL) {
+ if (ContextEntry->Bits.Present == 0) {
+ SecondLevelPagingEntry = CreateSecondLevelPagingEntry (VtdIndex, 0, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry - 0x%x (S%04x B%02x D%02x F%02x) New\n", SecondLevelPagingEntry, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ Pt = (UINT64)RShiftU64 ((UINT64)(UINTN)SecondLevelPagingEntry, 12);
+
+ ContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ContextEntry->Bits.DomainIdentifier = DomainIdentifier;
+ ContextEntry->Bits.Present = 1;
+ FlushPageTableMemory (VtdIndex, (UINTN)ContextEntry, sizeof(*ContextEntry));
+ VtdLibDumpDmarContextEntryTable (NULL, NULL, mVtdUnitInformation[VtdIndex].RootEntryTable, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ mVtdUnitInformation[VtdIndex].HasDirtyContext = TRUE;
+ } else {
+ SecondLevelPagingEntry = (VOID *)(UINTN)VTD_64BITS_ADDRESS(ContextEntry->Bits.SecondLevelPageTranslationPointerLo, ContextEntry->Bits.SecondLevelPageTranslationPointerHi);
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry - 0x%x (S%04x B%02x D%02x F%02x)\n", SecondLevelPagingEntry, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ }
+ }
+
+ //
+ // Do not update FixedSecondLevelPagingEntry
+ //
+ if (SecondLevelPagingEntry != mVtdUnitInformation[VtdIndex].FixedSecondLevelPagingEntry) {
+ Status = SetPageAttribute (
+ VtdIndex,
+ DomainIdentifier,
+ SecondLevelPagingEntry,
+ BaseAddress,
+ Length,
+ IoMmuAccess
+ );
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR,"SetPageAttribute - %r\n", Status));
+ return Status;
+ }
+ }
+
+ InvalidatePageEntry (VtdIndex);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Always enable the VTd page attribute for the device.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device.
+**/
+EFI_STATUS
+AlwaysEnablePageAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ )
+{
+ UINTN VtdIndex;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+ UINT64 Pt;
+
+ DEBUG ((DEBUG_INFO,"AlwaysEnablePageAttribute (S%04x B%02x D%02x F%02x)\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ VtdIndex = FindVtdIndexByPciDevice (Segment, SourceId, &ExtContextEntry, &ContextEntry);
+ if (VtdIndex == (UINTN)-1) {
+ DEBUG ((DEBUG_ERROR,"AlwaysEnablePageAttribute - Pci device (S%04x B%02x D%02x F%02x) not found!\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ return EFI_DEVICE_ERROR;
+ }
+
+ if (mVtdUnitInformation[VtdIndex].FixedSecondLevelPagingEntry == 0) {
+ DEBUG((DEBUG_INFO, "CreateSecondLevelPagingEntry - %d\n", VtdIndex));
+ mVtdUnitInformation[VtdIndex].FixedSecondLevelPagingEntry = CreateSecondLevelPagingEntry (VtdIndex, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ }
+
+ SecondLevelPagingEntry = mVtdUnitInformation[VtdIndex].FixedSecondLevelPagingEntry;
+ Pt = (UINT64)RShiftU64 ((UINT64)(UINTN)SecondLevelPagingEntry, 12);
+ if (ExtContextEntry != NULL) {
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ExtContextEntry->Bits.DomainIdentifier = ((1 << (UINT8)((UINTN)mVtdUnitInformation[VtdIndex].CapReg.Bits.ND * 2 + 4)) - 1);
+ ExtContextEntry->Bits.Present = 1;
+ FlushPageTableMemory (VtdIndex, (UINTN)ExtContextEntry, sizeof(*ExtContextEntry));
+ } else if (ContextEntry != NULL) {
+ ContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ContextEntry->Bits.DomainIdentifier = ((1 << (UINT8)((UINTN)mVtdUnitInformation[VtdIndex].CapReg.Bits.ND * 2 + 4)) - 1);
+ ContextEntry->Bits.Present = 1;
+ FlushPageTableMemory (VtdIndex, (UINTN)ContextEntry, sizeof(*ContextEntry));
+ }
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTableEx.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTableEx.c
new file mode 100644
index 000000000..c07afaf2b
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTableEx.c
@@ -0,0 +1,108 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+/**
+ Create extended context entry.
+
+ @param[in] VtdIndex The index of the VTd engine.
+
+ @retval EFI_SUCCESS The extended context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create extended context entry.
+**/
+EFI_STATUS
+CreateExtContextEntry (
+ IN UINTN VtdIndex
+ )
+{
+ UINTN Index;
+ VOID *Buffer;
+ UINTN RootPages;
+ UINTN ContextPages;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntry;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntryTable;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+ VTD_SOURCE_ID *PciSourceId;
+ VTD_SOURCE_ID SourceId;
+ UINTN MaxBusNumber;
+ UINTN EntryTablePages;
+
+ MaxBusNumber = 0;
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+ if (PciSourceId->Bits.Bus > MaxBusNumber) {
+ MaxBusNumber = PciSourceId->Bits.Bus;
+ }
+ }
+ DEBUG ((DEBUG_INFO," MaxBusNumber - 0x%x\n", MaxBusNumber));
+
+ RootPages = EFI_SIZE_TO_PAGES (sizeof (VTD_EXT_ROOT_ENTRY) * VTD_ROOT_ENTRY_NUMBER);
+ ContextPages = EFI_SIZE_TO_PAGES (sizeof (VTD_EXT_CONTEXT_ENTRY) * VTD_CONTEXT_ENTRY_NUMBER);
+ EntryTablePages = RootPages + ContextPages * (MaxBusNumber + 1);
+ Buffer = AllocateZeroPages (EntryTablePages);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_INFO,"Could not Alloc Root Entry Table.. \n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+ mVtdUnitInformation[VtdIndex].ExtRootEntryTable = (VTD_EXT_ROOT_ENTRY *)Buffer;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (RootPages);
+
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+
+ SourceId.Bits.Bus = PciSourceId->Bits.Bus;
+ SourceId.Bits.Device = PciSourceId->Bits.Device;
+ SourceId.Bits.Function = PciSourceId->Bits.Function;
+
+ ExtRootEntry = &mVtdUnitInformation[VtdIndex].ExtRootEntryTable[SourceId.Index.RootIndex];
+ if (ExtRootEntry->Bits.LowerPresent == 0) {
+ ExtRootEntry->Bits.LowerContextTablePointerLo = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 12);
+ ExtRootEntry->Bits.LowerContextTablePointerHi = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 32);
+ ExtRootEntry->Bits.LowerPresent = 1;
+ ExtRootEntry->Bits.UpperContextTablePointerLo = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 12) + 1;
+ ExtRootEntry->Bits.UpperContextTablePointerHi = (UINT32) RShiftU64 (RShiftU64 ((UINT64)(UINTN)Buffer, 12) + 1, 20);
+ ExtRootEntry->Bits.UpperPresent = 1;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (ContextPages);
+ }
+
+ ExtContextEntryTable = (VTD_EXT_CONTEXT_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(ExtRootEntry->Bits.LowerContextTablePointerLo, ExtRootEntry->Bits.LowerContextTablePointerHi) ;
+ ExtContextEntry = &ExtContextEntryTable[SourceId.Index.ContextIndex];
+ ExtContextEntry->Bits.TranslationType = 0;
+ ExtContextEntry->Bits.FaultProcessingDisable = 0;
+ ExtContextEntry->Bits.Present = 0;
+
+ DEBUG ((DEBUG_INFO,"DOMAIN: S%04x, B%02x D%02x F%02x\n", mVtdUnitInformation[VtdIndex].Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = FALSE;
+ if ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT3) != 0) {
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = TRUE;
+ if ((mAcpiDmarTable->HostAddressWidth <= 48) &&
+ ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT2) != 0)) {
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = FALSE;
+ }
+ } else if ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT2) == 0) {
+ DEBUG((DEBUG_ERROR, "!!!! Page-table type is not supported on VTD %d !!!!\n", VtdIndex));
+ return EFI_UNSUPPORTED;
+ }
+
+ if (mVtdUnitInformation[VtdIndex].Is5LevelPaging) {
+ ExtContextEntry->Bits.AddressWidth = 0x3;
+ DEBUG((DEBUG_INFO, "Using 5-level page-table on VTD %d\n", VtdIndex));
+ } else {
+ ExtContextEntry->Bits.AddressWidth = 0x2;
+ DEBUG((DEBUG_INFO, "Using 4-level page-table on VTD %d\n", VtdIndex));
+ }
+
+
+ }
+
+ FlushPageTableMemory (VtdIndex, (UINTN)mVtdUnitInformation[VtdIndex].ExtRootEntryTable, EFI_PAGES_TO_SIZE(EntryTablePages));
+
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdReg.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdReg.c
new file mode 100644
index 000000000..56d621ff6
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdReg.c
@@ -0,0 +1,759 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+#define VTD_CAP_REG_NFR_MAX (256)
+
+UINTN mVtdUnitNumber = 0;
+VTD_UNIT_INFORMATION *mVtdUnitInformation = NULL;
+VTD_REGESTER_INFO *mVtdRegsInfoBuffer = NULL;
+
+BOOLEAN mVtdEnabled;
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+FlushPageTableMemory (
+ IN UINTN VtdIndex,
+ IN UINTN Base,
+ IN UINTN Size
+ )
+{
+ if (mVtdUnitInformation[VtdIndex].ECapReg.Bits.C == 0) {
+ WriteBackDataCacheRange ((VOID *)Base, Size);
+ }
+}
+
+/**
+ Perpare cache invalidation interface.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval EFI_UNSUPPORTED Invalidation method is not supported.
+ @retval EFI_OUT_OF_RESOURCES A memory allocation failed.
+**/
+EFI_STATUS
+PerpareCacheInvalidationInterface (
+ IN UINTN VtdIndex
+ )
+{
+ UINT32 Reg32;
+ VTD_IQA_REG IqaReg;
+ VTD_UNIT_INFORMATION *VtdUnitInfo;
+ UINTN VtdUnitBaseAddress;
+
+ VtdUnitInfo = &mVtdUnitInformation[VtdIndex];
+ VtdUnitBaseAddress = VtdUnitInfo->VtdUnitBaseAddress;
+
+ if (VtdUnitInfo->VerReg.Bits.Major <= 5) {
+ VtdUnitInfo->EnableQueuedInvalidation = 0;
+ DEBUG ((DEBUG_INFO, "Use Register-based Invalidation Interface for engine [%d]\n", VtdIndex));
+ return EFI_SUCCESS;
+ }
+
+ if (VtdUnitInfo->ECapReg.Bits.QI == 0) {
+ DEBUG ((DEBUG_ERROR, "Hardware does not support queued invalidations interface for engine [%d]\n", VtdIndex));
+ return EFI_UNSUPPORTED;
+ }
+
+ VtdUnitInfo->EnableQueuedInvalidation = 1;
+ DEBUG ((DEBUG_INFO, "Use Queued Invalidation Interface for engine [%d]\n", VtdIndex));
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ if ((Reg32 & B_GSTS_REG_QIES) != 0) {
+ DEBUG ((DEBUG_ERROR,"Queued Invalidation Interface was enabled.\n"));
+ Reg32 &= (~B_GSTS_REG_QIES);
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32);
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_QIES) != 0);
+ }
+
+ //
+ // Initialize the Invalidation Queue Tail Register to zero.
+ //
+ MmioWrite64 (VtdUnitBaseAddress + R_IQT_REG, 0);
+
+ //
+ // Setup the IQ address, size and descriptor width through the Invalidation Queue Address Register
+ //
+ if (VtdUnitInfo->QiDescBuffer == NULL) {
+ VtdUnitInfo->QiDescBufferSize = (sizeof (QI_256_DESC) * ((UINTN) 1 << (VTD_INVALIDATION_QUEUE_SIZE + 7)));
+ VtdUnitInfo->QiDescBuffer = AllocatePages (EFI_SIZE_TO_PAGES (VtdUnitInfo->QiDescBufferSize));
+ if (VtdUnitInfo->QiDescBuffer == NULL) {
+ DEBUG ((DEBUG_ERROR,"Could not Alloc Invalidation Queue Buffer.\n"));
+ VTdLogAddEvent (VTDLOG_DXE_QUEUED_INVALIDATION, VTD_LOG_QI_ERROR_OUT_OF_RESOURCES, VtdUnitBaseAddress);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ }
+
+ DEBUG ((DEBUG_INFO, "Invalidation Queue Buffer Size : %d\n", VtdUnitInfo->QiDescBufferSize));
+ //
+ // 4KB Aligned address
+ //
+ IqaReg.Uint64 = (UINT64) (UINTN) VtdUnitInfo->QiDescBuffer;
+ IqaReg.Bits.DW = VTD_QUEUED_INVALIDATION_DESCRIPTOR_WIDTH;
+ IqaReg.Bits.QS = VTD_INVALIDATION_QUEUE_SIZE;
+ MmioWrite64 (VtdUnitBaseAddress + R_IQA_REG, IqaReg.Uint64);
+ IqaReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQA_REG);
+ DEBUG ((DEBUG_INFO, "IQA_REG = 0x%lx, IQH_REG = 0x%lx\n", IqaReg.Uint64, MmioRead64 (VtdUnitBaseAddress + R_IQH_REG)));
+
+ //
+ // Enable the queued invalidation interface through the Global Command Register.
+ // When enabled, hardware sets the QIES field in the Global Status Register.
+ //
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Reg32 |= B_GMCD_REG_QIE;
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32);
+ DEBUG ((DEBUG_INFO, "Enable Queued Invalidation Interface. GCMD_REG = 0x%x\n", Reg32));
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_QIES) == 0);
+
+ VTdLogAddEvent (VTDLOG_DXE_QUEUED_INVALIDATION, VTD_LOG_QI_ENABLE, VtdUnitBaseAddress);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Submit the queued invalidation descriptor to the remapping
+ hardware unit and wait for its completion.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Desc The invalidate descriptor
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval RETURN_DEVICE_ERROR A fault is detected.
+ @retval EFI_INVALID_PARAMETER Parameter is invalid.
+**/
+EFI_STATUS
+SubmitQueuedInvalidationDescriptor (
+ IN UINTN VtdUnitBaseAddress,
+ IN QI_256_DESC *Desc
+ )
+{
+ EFI_STATUS Status;
+ VTD_REGESTER_QI_INFO RegisterQi;
+
+ Status = VtdLibSubmitQueuedInvalidationDescriptor (VtdUnitBaseAddress, Desc, FALSE);
+ if (Status == EFI_DEVICE_ERROR) {
+ RegisterQi.BaseAddress = VtdUnitBaseAddress;
+ RegisterQi.FstsReg = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);;
+ RegisterQi.IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+ VTdLogAddDataEvent (VTDLOG_PEI_REGISTER, VTDLOG_REGISTER_QI, &RegisterQi, sizeof (VTD_REGESTER_QI_INFO));
+
+ MmioWrite32 (VtdUnitBaseAddress + R_FSTS_REG, RegisterQi.FstsReg & (B_FSTS_REG_IQE | B_FSTS_REG_ITE | B_FSTS_REG_ICE));
+ }
+
+ return Status;
+}
+
+/**
+ Invalidate VTd context cache.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+**/
+EFI_STATUS
+InvalidateContextCache (
+ IN UINTN VtdIndex
+ )
+{
+ UINT64 Reg64;
+ QI_256_DESC QiDesc;
+
+ if (mVtdUnitInformation[VtdIndex].EnableQueuedInvalidation == 0) {
+ //
+ // Register-based Invalidation
+ //
+ Reg64 = MmioRead64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + R_CCMD_REG);
+ if ((Reg64 & B_CCMD_REG_ICC) != 0) {
+ DEBUG ((DEBUG_ERROR,"ERROR: InvalidateContextCache: B_CCMD_REG_ICC is set for VTD(%d)\n",VtdIndex));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Reg64 &= ((~B_CCMD_REG_ICC) & (~B_CCMD_REG_CIRG_MASK));
+ Reg64 |= (B_CCMD_REG_ICC | V_CCMD_REG_CIRG_GLOBAL);
+ MmioWrite64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + R_CCMD_REG, Reg64);
+
+ do {
+ Reg64 = MmioRead64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + R_CCMD_REG);
+ } while ((Reg64 & B_CCMD_REG_ICC) != 0);
+ } else {
+ //
+ // Queued Invalidation
+ //
+ QiDesc.Uint64[0] = QI_CC_FM(0) | QI_CC_SID(0) | QI_CC_DID(0) | QI_CC_GRAN(1) | QI_CC_TYPE;
+ QiDesc.Uint64[1] = 0;
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ return SubmitQueuedInvalidationDescriptor(mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress, &QiDesc);
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Invalidate VTd IOTLB.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+**/
+EFI_STATUS
+InvalidateIOTLB (
+ IN UINTN VtdIndex
+ )
+{
+ UINT64 Reg64;
+ QI_256_DESC QiDesc;
+
+ if (mVtdUnitInformation[VtdIndex].EnableQueuedInvalidation == 0) {
+ //
+ // Register-based Invalidation
+ //
+ Reg64 = MmioRead64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + (mVtdUnitInformation[VtdIndex].ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+ if ((Reg64 & B_IOTLB_REG_IVT) != 0) {
+ DEBUG ((DEBUG_ERROR,"ERROR: InvalidateIOTLB: B_IOTLB_REG_IVT is set for VTD(%d)\n", VtdIndex));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Reg64 &= ((~B_IOTLB_REG_IVT) & (~B_IOTLB_REG_IIRG_MASK));
+ Reg64 |= (B_IOTLB_REG_IVT | V_IOTLB_REG_IIRG_GLOBAL);
+ MmioWrite64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + (mVtdUnitInformation[VtdIndex].ECapReg.Bits.IRO * 16) + R_IOTLB_REG, Reg64);
+
+ do {
+ Reg64 = MmioRead64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + (mVtdUnitInformation[VtdIndex].ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+ } while ((Reg64 & B_IOTLB_REG_IVT) != 0);
+ } else {
+ //
+ // Queued Invalidation
+ //
+ QiDesc.Uint64[0] = QI_IOTLB_DID(0) | QI_IOTLB_DR(CAP_READ_DRAIN(mVtdUnitInformation[VtdIndex].CapReg.Uint64)) | QI_IOTLB_DW(CAP_WRITE_DRAIN(mVtdUnitInformation[VtdIndex].CapReg.Uint64)) | QI_IOTLB_GRAN(1) | QI_IOTLB_TYPE;
+ QiDesc.Uint64[1] = QI_IOTLB_ADDR(0) | QI_IOTLB_IH(0) | QI_IOTLB_AM(0);
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ return SubmitQueuedInvalidationDescriptor(mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress, &QiDesc);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Invalid VTd global IOTLB.
+
+ @param[in] VtdIndex The index of VTd engine.
+
+ @retval EFI_SUCCESS VTd global IOTLB is invalidated.
+ @retval EFI_DEVICE_ERROR VTd global IOTLB is not invalidated.
+**/
+EFI_STATUS
+InvalidateVtdIOTLBGlobal (
+ IN UINTN VtdIndex
+ )
+{
+ if (!mVtdEnabled) {
+ return EFI_SUCCESS;
+ }
+
+ DEBUG((DEBUG_VERBOSE, "InvalidateVtdIOTLBGlobal(%d)\n", VtdIndex));
+
+ //
+ // Write Buffer Flush before invalidation
+ //
+ VtdLibFlushWriteBuffer (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress);
+
+ //
+ // Invalidate the context cache
+ //
+ if (mVtdUnitInformation[VtdIndex].HasDirtyContext) {
+ InvalidateContextCache (VtdIndex);
+ }
+
+ //
+ // Invalidate the IOTLB cache
+ //
+ if (mVtdUnitInformation[VtdIndex].HasDirtyContext || mVtdUnitInformation[VtdIndex].HasDirtyPages) {
+ InvalidateIOTLB (VtdIndex);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Prepare VTD configuration.
+**/
+VOID
+PrepareVtdConfig (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN DomainNumber;
+ EFI_STATUS Status;
+
+ if (mVtdRegsInfoBuffer == NULL) {
+ mVtdRegsInfoBuffer = AllocateZeroPool (sizeof (VTD_REGESTER_INFO) + sizeof (VTD_UINT128) * VTD_CAP_REG_NFR_MAX);
+ ASSERT (mVtdRegsInfoBuffer != NULL);
+ }
+
+ //
+ // Dump VTd error before DXE phase
+ //
+ DumpVtdIfError ();
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ DEBUG ((DEBUG_INFO, "Dump VTd Capability (%d)\n", Index));
+ mVtdUnitInformation[Index].VerReg.Uint32 = MmioRead32 (mVtdUnitInformation[Index].VtdUnitBaseAddress + R_VER_REG);
+ DumpVtdVerRegs (&mVtdUnitInformation[Index].VerReg);
+ mVtdUnitInformation[Index].CapReg.Uint64 = MmioRead64 (mVtdUnitInformation[Index].VtdUnitBaseAddress + R_CAP_REG);
+ DumpVtdCapRegs (&mVtdUnitInformation[Index].CapReg);
+ mVtdUnitInformation[Index].ECapReg.Uint64 = MmioRead64 (mVtdUnitInformation[Index].VtdUnitBaseAddress + R_ECAP_REG);
+ DumpVtdECapRegs (&mVtdUnitInformation[Index].ECapReg);
+
+ if ((mVtdUnitInformation[Index].CapReg.Bits.SLLPS & BIT0) == 0) {
+ DEBUG((DEBUG_WARN, "!!!! 2MB super page is not supported on VTD %d !!!!\n", Index));
+ }
+ if ((mVtdUnitInformation[Index].CapReg.Bits.SAGAW & BIT3) != 0) {
+ DEBUG((DEBUG_INFO, "Support 5-level page-table on VTD %d\n", Index));
+ }
+ if ((mVtdUnitInformation[Index].CapReg.Bits.SAGAW & BIT2) != 0) {
+ DEBUG((DEBUG_INFO, "Support 4-level page-table on VTD %d\n", Index));
+ }
+ if ((mVtdUnitInformation[Index].CapReg.Bits.SAGAW & (BIT3 | BIT2)) == 0) {
+ DEBUG((DEBUG_ERROR, "!!!! Page-table type 0x%X is not supported on VTD %d !!!!\n", Index, mVtdUnitInformation[Index].CapReg.Bits.SAGAW));
+ return ;
+ }
+
+ DomainNumber = (UINTN)1 << (UINT8)((UINTN)mVtdUnitInformation[Index].CapReg.Bits.ND * 2 + 4);
+ if (mVtdUnitInformation[Index].PciDeviceInfo->PciDeviceDataNumber >= DomainNumber) {
+ DEBUG((DEBUG_ERROR, "!!!! Pci device Number(0x%x) >= DomainNumber(0x%x) !!!!\n", mVtdUnitInformation[Index].PciDeviceInfo->PciDeviceDataNumber, DomainNumber));
+ return ;
+ }
+
+ Status = PerpareCacheInvalidationInterface(Index);
+ if (EFI_ERROR (Status)) {
+ ASSERT(FALSE);
+ return;
+ }
+ }
+ return ;
+}
+
+/**
+ Disable PMR in all VTd engine.
+**/
+VOID
+DisablePmr (
+ VOID
+ )
+{
+ UINTN Index;
+ EFI_STATUS Status;
+
+ DEBUG ((DEBUG_INFO,"DisablePmr\n"));
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ Status = VtdLibDisablePmr (mVtdUnitInformation[Index].VtdUnitBaseAddress);
+ VTdLogAddEvent (VTDLOG_DXE_DISABLE_PMR, mVtdUnitInformation[Index].VtdUnitBaseAddress, Status);
+ }
+
+ return ;
+}
+
+/**
+ Update Root Table Address Register
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] EnableADM TRUE - Enable ADM in TTM bits
+**/
+VOID
+UpdateRootTableAddressRegister (
+ IN UINTN VtdIndex,
+ IN BOOLEAN EnableADM
+ )
+{
+ UINT64 Reg64;
+
+ if (mVtdUnitInformation[VtdIndex].ExtRootEntryTable != NULL) {
+ DEBUG((DEBUG_INFO, "ExtRootEntryTable 0x%x \n", mVtdUnitInformation[VtdIndex].ExtRootEntryTable));
+ Reg64 = (UINT64)(UINTN)mVtdUnitInformation[VtdIndex].ExtRootEntryTable | (EnableADM ? V_RTADDR_REG_TTM_ADM : BIT11);
+ } else {
+ DEBUG((DEBUG_INFO, "RootEntryTable 0x%x \n", mVtdUnitInformation[VtdIndex].RootEntryTable));
+ Reg64 = (UINT64)(UINTN)mVtdUnitInformation[VtdIndex].RootEntryTable | (EnableADM ? V_RTADDR_REG_TTM_ADM : 0);
+ }
+ MmioWrite64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + R_RTADDR_REG, Reg64);
+}
+
+/**
+ Enable DMAR translation.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableDmar (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN VtdUnitBaseAddress;
+ BOOLEAN TEWasEnabled;
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ VtdUnitBaseAddress = mVtdUnitInformation[Index].VtdUnitBaseAddress;
+ DEBUG((DEBUG_INFO, ">>>>>>EnableDmar() for engine [%d] BAR [0x%x]\n", Index, VtdUnitBaseAddress));
+
+ //
+ // Check TE was enabled or not.
+ //
+ TEWasEnabled = ((MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG) & B_GSTS_REG_TE) == B_GSTS_REG_TE);
+
+ if (TEWasEnabled && (mVtdUnitInformation[Index].ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ //
+ // For implementations reporting Enhanced SRTP Support (ESRTPS) field as
+ // Clear in the Capability register, software must not modify this field while
+ // DMA remapping is active (TES=1 in Global Status register).
+ //
+ if (mVtdUnitInformation[Index].CapReg.Bits.ESRTPS == 0) {
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+ }
+
+ //
+ // Enable ADM
+ //
+ UpdateRootTableAddressRegister (Index, TRUE);
+
+ DEBUG((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+
+ DEBUG((DEBUG_INFO, "Enable Abort DMA Mode...\n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ } else {
+ UpdateRootTableAddressRegister (Index, FALSE);
+
+ DEBUG((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+ }
+
+ //
+ // Write Buffer Flush before invalidation
+ //
+ VtdLibFlushWriteBuffer (VtdUnitBaseAddress);
+
+ //
+ // Invalidate the context cache
+ //
+ InvalidateContextCache (Index);
+
+ //
+ // Invalidate the IOTLB cache
+ //
+ InvalidateIOTLB (Index);
+
+ if (TEWasEnabled && (mVtdUnitInformation[Index].ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ if (mVtdUnitInformation[Index].CapReg.Bits.ESRTPS == 0) {
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+ }
+
+ UpdateRootTableAddressRegister (Index, FALSE);
+
+ DEBUG((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+ }
+
+ //
+ // Enable VTd
+ //
+ DEBUG ((DEBUG_INFO, "EnableDmar: Waiting B_GSTS_REG_TE ...\n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ DEBUG ((DEBUG_INFO,"VTD (%d) enabled!<<<<<<\n",Index));
+
+ VTdLogAddEvent (VTDLOG_DXE_ENABLE_DMAR, mVtdUnitInformation[Index].VtdUnitBaseAddress, 0);
+ }
+
+ //
+ // Need disable PMR, since we already setup translation table.
+ //
+ DisablePmr ();
+
+ mVtdEnabled = TRUE;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Disable DMAR translation.
+
+ @retval EFI_SUCCESS DMAR translation is disabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not disabled.
+**/
+EFI_STATUS
+DisableDmar (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN SubIndex;
+ VTD_UNIT_INFORMATION *VtdUnitInfo;
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ VtdUnitInfo = &mVtdUnitInformation[Index];
+
+ VtdLibDisableDmar (VtdUnitInfo->VtdUnitBaseAddress);
+ VTdLogAddEvent (VTDLOG_DXE_DISABLE_DMAR, VtdUnitInfo->VtdUnitBaseAddress, 0);
+
+ if (VtdUnitInfo->EnableQueuedInvalidation != 0) {
+ //
+ // Disable queued invalidation interface.
+ //
+ VtdLibDisableQueuedInvalidationInterface (VtdUnitInfo->VtdUnitBaseAddress);
+ VTdLogAddEvent (VTDLOG_DXE_QUEUED_INVALIDATION, VTD_LOG_QI_DISABLE, VtdUnitInfo->VtdUnitBaseAddress);
+
+ //
+ // Free descriptor queue memory
+ //
+ if (VtdUnitInfo->QiDescBuffer != NULL) {
+ FreePages(VtdUnitInfo->QiDescBuffer, EFI_SIZE_TO_PAGES (VtdUnitInfo->QiDescBufferSize));
+ VtdUnitInfo->QiDescBuffer = NULL;
+ VtdUnitInfo->QiDescBufferSize = 0;
+ }
+
+ VtdUnitInfo->EnableQueuedInvalidation = 0;
+ }
+ }
+
+ mVtdEnabled = FALSE;
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ VtdUnitInfo = &mVtdUnitInformation[Index];
+ DEBUG((DEBUG_INFO, "engine [%d] access\n", Index));
+ for (SubIndex = 0; SubIndex < VtdUnitInfo->PciDeviceInfo->PciDeviceDataNumber; SubIndex++) {
+ DEBUG ((DEBUG_INFO, " PCI S%04X B%02x D%02x F%02x - %d\n",
+ VtdUnitInfo->Segment,
+ VtdUnitInfo->PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Bus,
+ VtdUnitInfo->PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Device,
+ VtdUnitInfo->PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Function,
+ VtdUnitInfo->PciDeviceInfo->PciDeviceData[Index].AccessCount
+ ));
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Dump VTd version registers.
+
+ @param[in] VerReg The version register.
+**/
+VOID
+DumpVtdVerRegs (
+ IN VTD_VER_REG *VerReg
+ )
+{
+ DEBUG ((DEBUG_INFO, " VerReg - 0x%x\n", VerReg->Uint32));
+ DEBUG ((DEBUG_INFO, " Major - 0x%x\n", VerReg->Bits.Major));
+ DEBUG ((DEBUG_INFO, " Minor - 0x%x\n", VerReg->Bits.Minor));
+}
+
+/**
+ Dump VTd capability registers.
+
+ @param[in] CapReg The capability register.
+**/
+VOID
+DumpVtdCapRegs (
+ IN VTD_CAP_REG *CapReg
+ )
+{
+ DEBUG((DEBUG_INFO, " CapReg - 0x%x\n", CapReg->Uint64));
+ DEBUG((DEBUG_INFO, " ND - 0x%x\n", CapReg->Bits.ND));
+ DEBUG((DEBUG_INFO, " AFL - 0x%x\n", CapReg->Bits.AFL));
+ DEBUG((DEBUG_INFO, " RWBF - 0x%x\n", CapReg->Bits.RWBF));
+ DEBUG((DEBUG_INFO, " PLMR - 0x%x\n", CapReg->Bits.PLMR));
+ DEBUG((DEBUG_INFO, " PHMR - 0x%x\n", CapReg->Bits.PHMR));
+ DEBUG((DEBUG_INFO, " CM - 0x%x\n", CapReg->Bits.CM));
+ DEBUG((DEBUG_INFO, " SAGAW - 0x%x\n", CapReg->Bits.SAGAW));
+ DEBUG((DEBUG_INFO, " MGAW - 0x%x\n", CapReg->Bits.MGAW));
+ DEBUG((DEBUG_INFO, " ZLR - 0x%x\n", CapReg->Bits.ZLR));
+ DEBUG((DEBUG_INFO, " FRO - 0x%x\n", CapReg->Bits.FRO));
+ DEBUG((DEBUG_INFO, " SLLPS - 0x%x\n", CapReg->Bits.SLLPS));
+ DEBUG((DEBUG_INFO, " PSI - 0x%x\n", CapReg->Bits.PSI));
+ DEBUG((DEBUG_INFO, " NFR - 0x%x\n", CapReg->Bits.NFR));
+ DEBUG((DEBUG_INFO, " MAMV - 0x%x\n", CapReg->Bits.MAMV));
+ DEBUG((DEBUG_INFO, " DWD - 0x%x\n", CapReg->Bits.DWD));
+ DEBUG((DEBUG_INFO, " DRD - 0x%x\n", CapReg->Bits.DRD));
+ DEBUG((DEBUG_INFO, " FL1GP - 0x%x\n", CapReg->Bits.FL1GP));
+ DEBUG((DEBUG_INFO, " PI - 0x%x\n", CapReg->Bits.PI));
+}
+
+/**
+ Dump VTd extended capability registers.
+
+ @param[in] ECapReg The extended capability register.
+**/
+VOID
+DumpVtdECapRegs (
+ IN VTD_ECAP_REG *ECapReg
+ )
+{
+ DEBUG((DEBUG_INFO, " ECapReg - 0x%x\n", ECapReg->Uint64));
+ DEBUG((DEBUG_INFO, " C - 0x%x\n", ECapReg->Bits.C));
+ DEBUG((DEBUG_INFO, " QI - 0x%x\n", ECapReg->Bits.QI));
+ DEBUG((DEBUG_INFO, " DT - 0x%x\n", ECapReg->Bits.DT));
+ DEBUG((DEBUG_INFO, " IR - 0x%x\n", ECapReg->Bits.IR));
+ DEBUG((DEBUG_INFO, " EIM - 0x%x\n", ECapReg->Bits.EIM));
+ DEBUG((DEBUG_INFO, " PT - 0x%x\n", ECapReg->Bits.PT));
+ DEBUG((DEBUG_INFO, " SC - 0x%x\n", ECapReg->Bits.SC));
+ DEBUG((DEBUG_INFO, " IRO - 0x%x\n", ECapReg->Bits.IRO));
+ DEBUG((DEBUG_INFO, " MHMV - 0x%x\n", ECapReg->Bits.MHMV));
+ DEBUG((DEBUG_INFO, " MTS - 0x%x\n", ECapReg->Bits.MTS));
+ DEBUG((DEBUG_INFO, " NEST - 0x%x\n", ECapReg->Bits.NEST));
+ DEBUG((DEBUG_INFO, " PASID - 0x%x\n", ECapReg->Bits.PASID));
+ DEBUG((DEBUG_INFO, " PRS - 0x%x\n", ECapReg->Bits.PRS));
+ DEBUG((DEBUG_INFO, " ERS - 0x%x\n", ECapReg->Bits.ERS));
+ DEBUG((DEBUG_INFO, " SRS - 0x%x\n", ECapReg->Bits.SRS));
+ DEBUG((DEBUG_INFO, " NWFS - 0x%x\n", ECapReg->Bits.NWFS));
+ DEBUG((DEBUG_INFO, " EAFS - 0x%x\n", ECapReg->Bits.EAFS));
+ DEBUG((DEBUG_INFO, " PSS - 0x%x\n", ECapReg->Bits.PSS));
+ DEBUG((DEBUG_INFO, " SMTS - 0x%x\n", ECapReg->Bits.SMTS));
+ DEBUG((DEBUG_INFO, " ADMS - 0x%x\n", ECapReg->Bits.ADMS));
+}
+
+/**
+ Dump VTd registers.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+DumpVtdRegs (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ VTD_REGESTER_INFO *VtdRegInfo;
+ VTD_ECAP_REG ECapReg;
+ VTD_CAP_REG CapReg;
+
+ if (mVtdRegsInfoBuffer == NULL) {
+ return;
+ }
+
+ VtdRegInfo = mVtdRegsInfoBuffer;
+ VtdRegInfo->BaseAddress = VtdUnitBaseAddress;
+ VtdRegInfo->VerReg = MmioRead32 (VtdUnitBaseAddress + R_VER_REG);
+ VtdRegInfo->CapReg = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ VtdRegInfo->EcapReg = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ VtdRegInfo->GstsReg = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ VtdRegInfo->RtaddrReg = MmioRead64 (VtdUnitBaseAddress + R_RTADDR_REG);
+ VtdRegInfo->CcmdReg = MmioRead64 (VtdUnitBaseAddress + R_CCMD_REG);
+ VtdRegInfo->FstsReg = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);
+ VtdRegInfo->FectlReg = MmioRead32 (VtdUnitBaseAddress + R_FECTL_REG);
+ VtdRegInfo->FedataReg = MmioRead32 (VtdUnitBaseAddress + R_FEDATA_REG);
+ VtdRegInfo->FeaddrReg = MmioRead32 (VtdUnitBaseAddress + R_FEADDR_REG);
+ VtdRegInfo->FeuaddrReg = MmioRead32 (VtdUnitBaseAddress + R_FEUADDR_REG);
+ VtdRegInfo->IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+
+ CapReg.Uint64 = VtdRegInfo->CapReg;
+ for (VtdRegInfo->FrcdRegNum = 0; VtdRegInfo->FrcdRegNum < (UINT16) CapReg.Bits.NFR + 1; VtdRegInfo->FrcdRegNum++) {
+ VtdRegInfo->FrcdReg[VtdRegInfo->FrcdRegNum].Uint64Lo = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (VtdRegInfo->FrcdRegNum * 16) + R_FRCD_REG));
+ VtdRegInfo->FrcdReg[VtdRegInfo->FrcdRegNum].Uint64Hi = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (VtdRegInfo->FrcdRegNum * 16) + R_FRCD_REG + sizeof(UINT64)));
+ }
+
+ ECapReg.Uint64 = VtdRegInfo->EcapReg;
+ VtdRegInfo->IvaReg = MmioRead64 (VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IVA_REG);
+ VtdRegInfo->IotlbReg = MmioRead64 (VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+
+ DEBUG((DEBUG_INFO, "#### DumpVtdRegs(0x%016lx) Begin ####\n", VtdUnitBaseAddress));
+
+ VtdLibDumpVtdRegsAll (NULL, NULL, VtdRegInfo);
+
+ DEBUG((DEBUG_INFO, "#### DumpVtdRegs(0x%016lx) End ####\n", VtdUnitBaseAddress));
+
+ VTdLogAddDataEvent (VTDLOG_DXE_REGISTER, VTDLOG_REGISTER_ALL, (VOID *) VtdRegInfo, sizeof (VTD_REGESTER_INFO) + sizeof (VTD_UINT128) * (VtdRegInfo->FrcdRegNum - 1));
+}
+
+/**
+ Dump VTd registers for all VTd engine.
+**/
+VOID
+DumpVtdRegsAll (
+ VOID
+ )
+{
+ UINTN VtdIndex;
+
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ DumpVtdRegs (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress);
+ }
+}
+
+/**
+ Dump VTd registers if there is error.
+**/
+VOID
+DumpVtdIfError (
+ VOID
+ )
+{
+ UINTN Num;
+ UINTN Index;
+ VTD_FRCD_REG FrcdReg;
+ VTD_CAP_REG CapReg;
+ UINT32 Reg32;
+ BOOLEAN HasError;
+
+ for (Num = 0; Num < mVtdUnitNumber; Num++) {
+ HasError = FALSE;
+ Reg32 = MmioRead32 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_FSTS_REG);
+ if (Reg32 != 0) {
+ HasError = TRUE;
+ }
+ Reg32 = MmioRead32 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_FECTL_REG);
+ if ((Reg32 & BIT30) != 0) {
+ HasError = TRUE;
+ }
+
+ CapReg.Uint64 = MmioRead64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_CAP_REG);
+ for (Index = 0; Index < (UINTN)CapReg.Bits.NFR + 1; Index++) {
+ FrcdReg.Uint64[0] = MmioRead64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG));
+ FrcdReg.Uint64[1] = MmioRead64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ if (FrcdReg.Bits.F != 0) {
+ HasError = TRUE;
+ }
+ }
+
+ if (HasError) {
+ REPORT_STATUS_CODE (EFI_ERROR_CODE, PcdGet32 (PcdErrorCodeVTdError));
+ DEBUG((DEBUG_INFO, "\n#### ERROR ####\n"));
+ DumpVtdRegs (Num);
+ DEBUG((DEBUG_INFO, "#### ERROR ####\n\n"));
+ //
+ // Clear
+ //
+ for (Index = 0; Index < (UINTN)CapReg.Bits.NFR + 1; Index++) {
+ FrcdReg.Uint64[1] = MmioRead64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ if (FrcdReg.Bits.F != 0) {
+ //
+ // Software writes the value read from this field (F) to Clear it.
+ //
+ MmioWrite64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)), FrcdReg.Uint64[1]);
+ }
+ }
+ MmioWrite32 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_FSTS_REG, MmioRead32 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_FSTS_REG));
+ }
+ }
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/DmarTable.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/DmarTable.c
new file mode 100644
index 000000000..5672f4014
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/DmarTable.c
@@ -0,0 +1,301 @@
+/** @file
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi.h>
+#include <PiPei.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/DebugLib.h>
+#include <Library/HobLib.h>
+#include <Library/PciSegmentLib.h>
+#include <IndustryStandard/Vtd.h>
+#include <IndustryStandard/Pci.h>
+#include <Protocol/IoMmu.h>
+#include <Ppi/VtdInfo.h>
+#include <Guid/VtdLogDataHob.h>
+#include "IntelVTdCorePei.h"
+
+/**
+ Dump DMAR DeviceScopeEntry.
+
+ @param[in] DmarDeviceScopeEntry DMAR DeviceScopeEntry
+**/
+VOID
+DumpDmarDeviceScopeEntry (
+ IN EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry
+ )
+{
+ UINTN PciPathNumber;
+ UINTN PciPathIndex;
+ EFI_ACPI_DMAR_PCI_PATH *PciPath;
+
+ if (DmarDeviceScopeEntry == NULL) {
+ return;
+ }
+
+ DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ " * DMA-Remapping Device Scope Entry Structure *\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ (sizeof (UINTN) == sizeof (UINT64)) ?
+ " DMAR Device Scope Entry address ...................... 0x%016lx\n" :
+ " DMAR Device Scope Entry address ...................... 0x%08x\n",
+ DmarDeviceScopeEntry
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Device Scope Entry Type ............................ 0x%02x\n",
+ DmarDeviceScopeEntry->Type
+ ));
+ switch (DmarDeviceScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ DEBUG ((DEBUG_INFO,
+ " PCI Endpoint Device\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ DEBUG ((DEBUG_INFO,
+ " PCI Sub-hierachy\n"
+ ));
+ break;
+ default:
+ break;
+ }
+ DEBUG ((DEBUG_INFO,
+ " Length ............................................. 0x%02x\n",
+ DmarDeviceScopeEntry->Length
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Enumeration ID ..................................... 0x%02x\n",
+ DmarDeviceScopeEntry->EnumerationId
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Starting Bus Number ................................ 0x%02x\n",
+ DmarDeviceScopeEntry->StartBusNumber
+ ));
+
+ PciPathNumber = (DmarDeviceScopeEntry->Length - sizeof (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER)) / sizeof (EFI_ACPI_DMAR_PCI_PATH);
+ PciPath = (EFI_ACPI_DMAR_PCI_PATH *) (DmarDeviceScopeEntry + 1);
+ for (PciPathIndex = 0; PciPathIndex < PciPathNumber; PciPathIndex++) {
+ DEBUG ((DEBUG_INFO,
+ " Device ............................................. 0x%02x\n",
+ PciPath[PciPathIndex].Device
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Function ........................................... 0x%02x\n",
+ PciPath[PciPathIndex].Function
+ ));
+ }
+
+ DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n\n"
+ ));
+
+ return;
+}
+
+/**
+ Dump DMAR DRHD table.
+
+ @param[in] Drhd DMAR DRHD table
+**/
+VOID
+DumpDmarDrhd (
+ IN EFI_ACPI_DMAR_DRHD_HEADER *Drhd
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN DrhdLen;
+
+ if (Drhd == NULL) {
+ return;
+ }
+
+ DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ " * DMA-Remapping Hardware Definition Structure *\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ (sizeof (UINTN) == sizeof (UINT64)) ?
+ " DRHD address ........................................... 0x%016lx\n" :
+ " DRHD address ........................................... 0x%08x\n",
+ Drhd
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Drhd->Header.Type
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Drhd->Header.Length
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Drhd->Flags
+ ));
+ DEBUG ((DEBUG_INFO,
+ " INCLUDE_PCI_ALL .................................... 0x%02x\n",
+ Drhd->Flags & EFI_ACPI_DMAR_DRHD_FLAGS_INCLUDE_PCI_ALL
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Drhd->SegmentNumber
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Register Base Address ................................ 0x%016lx\n",
+ Drhd->RegisterBaseAddress
+ ));
+
+ DrhdLen = Drhd->Header.Length - sizeof (EFI_ACPI_DMAR_DRHD_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *) (Drhd + 1);
+ while (DrhdLen > 0) {
+ DumpDmarDeviceScopeEntry (DmarDeviceScopeEntry);
+ DrhdLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *) ((UINTN) DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+
+ return;
+}
+
+/**
+ Dump DMAR ACPI table.
+
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+DumpAcpiDMAR (
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ INTN DmarLen;
+
+ if (Dmar == NULL) {
+ return;
+ }
+
+ //
+ // Dump Dmar table
+ //
+ DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ "* DMAR Table *\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n"
+ ));
+
+ DEBUG ((DEBUG_INFO,
+ (sizeof (UINTN) == sizeof (UINT64)) ?
+ "DMAR address ............................................. 0x%016lx\n" :
+ "DMAR address ............................................. 0x%08x\n",
+ Dmar
+ ));
+
+ DEBUG ((DEBUG_INFO,
+ " Table Contents:\n"
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Host Address Width ................................... 0x%02x\n",
+ Dmar->HostAddressWidth
+ ));
+ DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Dmar->Flags
+ ));
+ DEBUG ((DEBUG_INFO,
+ " INTR_REMAP ......................................... 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_INTR_REMAP
+ ));
+ DEBUG ((DEBUG_INFO,
+ " X2APIC_OPT_OUT_SET ................................. 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_X2APIC_OPT_OUT
+ ));
+ DEBUG ((DEBUG_INFO,
+ " DMA_CTRL_PLATFORM_OPT_IN_FLAG ...................... 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_DMA_CTRL_PLATFORM_OPT_IN_FLAG
+ ));
+
+ DmarLen = Dmar->Header.Length - sizeof (EFI_ACPI_DMAR_HEADER);
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *) (Dmar + 1);
+ while (DmarLen > 0) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ DumpDmarDrhd ((EFI_ACPI_DMAR_DRHD_HEADER *) DmarHeader);
+ break;
+ default:
+ break;
+ }
+ DmarLen -= DmarHeader->Length;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *) ((UINTN) DmarHeader + DmarHeader->Length);
+ }
+
+ DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n\n"
+ ));
+
+ return;
+}
+
+/**
+ Parse DMAR DRHD table.
+
+ @param[in] AcpiDmarTable DMAR ACPI table
+ @param[in] Callback Callback function for handle DRHD
+ @param[in] Context Callback function Context
+
+ @return the VTd engine number.
+
+**/
+UINTN
+ParseDmarAcpiTableDrhd (
+ IN EFI_ACPI_DMAR_HEADER *AcpiDmarTable,
+ IN PROCESS_DRHD_CALLBACK_FUNC Callback,
+ IN VOID *Context
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ UINTN VtdIndex;
+
+ VtdIndex = 0;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *) ((UINTN) (AcpiDmarTable + 1));
+
+ while ((UINTN) DmarHeader < (UINTN) AcpiDmarTable + AcpiDmarTable->Header.Length) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ if (Callback != NULL) {
+ Callback (Context, VtdIndex, (EFI_ACPI_DMAR_DRHD_HEADER *) DmarHeader);
+ }
+ VtdIndex++;
+ break;
+ default:
+ break;
+ }
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *) ((UINTN) DmarHeader + DmarHeader->Length);
+ }
+
+ return VtdIndex;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.c
new file mode 100644
index 000000000..0160c3604
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.c
@@ -0,0 +1,1099 @@
+/** @file
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi.h>
+#include <PiPei.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/IoLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PeiServicesLib.h>
+#include <Library/HobLib.h>
+#include <IndustryStandard/Vtd.h>
+#include <Ppi/IoMmu.h>
+#include <Ppi/VtdInfo.h>
+#include <Ppi/MemoryDiscovered.h>
+#include <Ppi/EndOfPeiPhase.h>
+#include <Guid/VtdPmrInfoHob.h>
+#include <Guid/VtdLogDataHob.h>
+#include "IntelVTdCorePei.h"
+
+#define VTD_UNIT_MAX 64
+
+EFI_GUID mVTdInfoGuid = {
+ 0x222f5e30, 0x5cd, 0x49c6, { 0x8a, 0xc, 0x36, 0xd6, 0x58, 0x41, 0xe0, 0x82 }
+};
+
+EFI_GUID mDmaBufferInfoGuid = {
+ 0x7b624ec7, 0xfb67, 0x4f9c, { 0xb6, 0xb0, 0x4d, 0xfa, 0x9c, 0x88, 0x20, 0x39 }
+};
+
+#define MAP_INFO_SIGNATURE SIGNATURE_32 ('D', 'M', 'A', 'P')
+typedef struct {
+ UINT32 Signature;
+ EDKII_IOMMU_OPERATION Operation;
+ UINTN NumberOfBytes;
+ EFI_PHYSICAL_ADDRESS HostAddress;
+ EFI_PHYSICAL_ADDRESS DeviceAddress;
+} MAP_INFO;
+
+/**
+ Allocate memory buffer for VTd log events.
+
+ @param[in] MemorySize Required memory buffer size.
+
+ @retval Buffer address
+
+**/
+UINT8 *
+EFIAPI
+VTdLogAllocMemory (
+ IN CONST UINT32 MemorySize
+ )
+{
+ VOID *HobPtr;
+ VTDLOG_PEI_BUFFER_HOB *BufferHob;
+ UINT8 *ReturnBuffer;
+
+ ReturnBuffer = NULL;
+ HobPtr = GetFirstGuidHob (&gVTdLogBufferHobGuid);
+ if (HobPtr != NULL) {
+ BufferHob = GET_GUID_HOB_DATA (HobPtr);
+
+ if (BufferHob->PostMemBuffer != 0) {
+ //
+ // Post-memory phase
+ //
+ if ((BufferHob->PostMemBufferUsed + MemorySize) < PcdGet32 (PcdVTdPeiPostMemLogBufferSize)) {
+ ReturnBuffer = &((UINT8 *) (UINTN) BufferHob->PostMemBuffer)[BufferHob->PostMemBufferUsed];
+ BufferHob->PostMemBufferUsed += MemorySize;
+ } else {
+ BufferHob->VtdLogPeiError |= VTD_LOG_ERROR_BUFFER_FULL;
+ }
+ }
+ }
+
+ return ReturnBuffer;
+}
+
+/**
+ Add the VTd log event in post memory phase.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+
+**/
+VOID
+EFIAPI
+VTdLogAddEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Data1,
+ IN CONST UINT64 Data2
+ )
+{
+ VTDLOG_EVENT_2PARAM *Item;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ } else if ((PcdGet8 (PcdVTdLogLevel) == 1) && (EventType >= VTDLOG_PEI_ADVANCED)) {
+ return;
+ }
+
+ Item = (VTDLOG_EVENT_2PARAM *) VTdLogAllocMemory (sizeof (VTDLOG_EVENT_2PARAM));
+ if (Item != NULL) {
+ Item->Data1 = Data1;
+ Item->Data2 = Data2;
+ Item->Header.DataSize = sizeof (VTDLOG_EVENT_2PARAM);
+ Item->Header.LogType = (UINT64) (1 << EventType);
+ Item->Header.Timestamp = AsmReadTsc ();
+ }
+}
+
+/**
+ Add a new VTd log event with data.
+
+ @param[in] EventType Event type
+ @param[in] Param parameter
+ @param[in] Data Data
+ @param[in] DataSize Data size
+
+**/
+VOID
+EFIAPI
+VTdLogAddDataEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Param,
+ IN CONST VOID *Data,
+ IN CONST UINT32 DataSize
+ )
+{
+ VTDLOG_EVENT_CONTEXT *Item;
+ UINT32 EventSize;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ } else if ((PcdGet8 (PcdVTdLogLevel) == 1) && (EventType >= VTDLOG_PEI_ADVANCED)) {
+ return;
+ }
+
+ EventSize = sizeof (VTDLOG_EVENT_CONTEXT) + DataSize - 1;
+
+ Item = (VTDLOG_EVENT_CONTEXT *) VTdLogAllocMemory (EventSize);
+ if (Item != NULL) {
+ Item->Param = Param;
+ CopyMem (Item->Data, Data, DataSize);
+
+ Item->Header.DataSize = EventSize;
+ Item->Header.LogType = (UINT64) (1 << EventType);
+ Item->Header.Timestamp = AsmReadTsc ();
+ }
+}
+/**
+ Add the VTd log event in pre-memory phase.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Mode Pre-memory DMA protection mode.
+ @param[in] Status Status
+
+**/
+VOID
+EFIAPI
+VTdLogAddPreMemoryEvent (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT8 Mode,
+ IN UINT8 Status
+ )
+{
+ VTDLOG_PEI_BUFFER_HOB *BufferHob;
+ VOID *HobPtr;
+ UINT8 Index;
+
+ HobPtr = GetFirstGuidHob (&gVTdLogBufferHobGuid);
+ if (HobPtr != NULL) {
+ BufferHob = GET_GUID_HOB_DATA (HobPtr);
+
+ for (Index = 0; Index < VTD_LOG_PEI_PRE_MEM_BAR_MAX; Index++) {
+ if (BufferHob->PreMemInfo[Index].Mode == VTD_LOG_PEI_PRE_MEM_NOT_USED) {
+ //
+ // Found a free posttion
+ //
+ BufferHob->PreMemInfo[Index].BarAddress = (UINT32) VtdUnitBaseAddress;
+ BufferHob->PreMemInfo[Index].Mode = Mode;
+ BufferHob->PreMemInfo[Index].Status = Status;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ Initializes the VTd Log.
+
+ @param[in] MemoryInitialized TRUE: It is post-memory phase
+ FALSE: It is pre-memory phase
+**/
+VOID
+EFIAPI
+VTdLogInitialize(
+ BOOLEAN MemoryInitialized
+ )
+{
+ VTDLOG_PEI_BUFFER_HOB *BufferHob;
+ VOID *HobPtr;
+
+ if (PcdGet8 (PcdVTdLogLevel) > 0) {
+ HobPtr = GetFirstGuidHob (&gVTdLogBufferHobGuid);
+ if (HobPtr == NULL) {
+ BufferHob = BuildGuidHob (&gVTdLogBufferHobGuid, sizeof (VTDLOG_PEI_BUFFER_HOB));
+ ASSERT (BufferHob != NULL);
+
+ ZeroMem (BufferHob, sizeof (VTDLOG_PEI_BUFFER_HOB));
+ } else {
+ BufferHob = GET_GUID_HOB_DATA (HobPtr);
+ }
+
+ if (MemoryInitialized) {
+ if ((BufferHob->PostMemBuffer == 0) && (PcdGet32 (PcdVTdPeiPostMemLogBufferSize) > 0)) {
+ BufferHob->PostMemBufferUsed = 0;
+ BufferHob->PostMemBuffer = (UINTN) AllocateAlignedPages (EFI_SIZE_TO_PAGES (PcdGet32 (PcdVTdPeiPostMemLogBufferSize)), sizeof (UINT8));
+ }
+ }
+ }
+}
+
+/**
+ Set IOMMU attribute for a system memory.
+
+ If the IOMMU PPI exists, the system memory cannot be used
+ for DMA by default.
+
+ When a device requests a DMA access for a system memory,
+ the device driver need use SetAttribute() to update the IOMMU
+ attribute to request DMA access (read and/or write).
+
+ @param[in] This The PPI instance pointer.
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] Mapping The mapping value returned from Map().
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by DeviceAddress and Length.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by Mapping.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuSetAttribute (
+ IN EDKII_IOMMU_PPI *This,
+ IN VOID *Mapping,
+ IN UINT64 IoMmuAccess
+ )
+{
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ //
+ // check and clear VTd error
+ //
+ DumpVtdIfError ();
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuSetAttribute:\n"));
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA(Hob);
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ DEBUG ((DEBUG_INFO, "PeiIoMmuSetAttribute: DmaBufferCurrentTop == 0\n"));
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Provides the controller-specific addresses required to access system memory from a
+ DMA bus master.
+
+ @param [in] This The PPI instance pointer.
+ @param [in] Operation Indicates if the bus master is going to read or write to system memory.
+ @param [in] HostAddress The system memory address to map to the PCI controller.
+ @param [in] [out] NumberOfBytes On input the number of bytes to map. On output the number of bytes
+ that were mapped.
+ @param [out] DeviceAddress The resulting map address for the bus master PCI controller to use to
+ access the hosts HostAddress.
+ @param [out] Mapping A resulting value to pass to Unmap().
+
+ @retval EFI_SUCCESS The range was mapped for the returned NumberOfBytes.
+ @retval EFI_UNSUPPORTED The HostAddress cannot be mapped as a common buffer.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources.
+ @retval EFI_DEVICE_ERROR The system hardware could not map the requested address.
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuMap (
+ IN EDKII_IOMMU_PPI *This,
+ IN EDKII_IOMMU_OPERATION Operation,
+ IN VOID *HostAddress,
+ IN OUT UINTN *NumberOfBytes,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT VOID **Mapping
+ )
+{
+ MAP_INFO *MapInfo;
+ UINTN Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA(Hob);
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuMap - HostAddress - 0x%x, NumberOfBytes - %x\n", HostAddress, *NumberOfBytes));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+ DEBUG ((DEBUG_INFO, " Operation - %x\n", Operation));
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ if (Operation == EdkiiIoMmuOperationBusMasterCommonBuffer ||
+ Operation == EdkiiIoMmuOperationBusMasterCommonBuffer64) {
+ *DeviceAddress = (UINTN) HostAddress;
+ *Mapping = NULL;
+ return EFI_SUCCESS;
+ }
+
+ Length = *NumberOfBytes + sizeof (MAP_INFO);
+ if (Length > DmaBufferInfo->DmaBufferCurrentTop - DmaBufferInfo->DmaBufferCurrentBottom) {
+ DEBUG ((DEBUG_ERROR, "PeiIoMmuMap - OUT_OF_RESOURCE\n"));
+ VTdLogAddEvent (VTDLOG_PEI_VTD_ERROR, VTD_LOG_PEI_VTD_ERROR_PPI_MAP, Length);
+ ASSERT (FALSE);
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ *DeviceAddress = DmaBufferInfo->DmaBufferCurrentBottom;
+ DmaBufferInfo->DmaBufferCurrentBottom += Length;
+
+ MapInfo = (VOID *) (UINTN) (*DeviceAddress + *NumberOfBytes);
+ MapInfo->Signature = MAP_INFO_SIGNATURE;
+ MapInfo->Operation = Operation;
+ MapInfo->NumberOfBytes = *NumberOfBytes;
+ MapInfo->HostAddress = (UINTN) HostAddress;
+ MapInfo->DeviceAddress = *DeviceAddress;
+ *Mapping = MapInfo;
+ DEBUG ((DEBUG_INFO, " Op(%x):DeviceAddress - %x, Mapping - %x\n", Operation, (UINTN) *DeviceAddress, MapInfo));
+
+ //
+ // If this is a read operation from the Bus Master's point of view,
+ // then copy the contents of the real buffer into the mapped buffer
+ // so the Bus Master can read the contents of the real buffer.
+ //
+ if (Operation == EdkiiIoMmuOperationBusMasterRead ||
+ Operation == EdkiiIoMmuOperationBusMasterRead64) {
+ CopyMem (
+ (VOID *) (UINTN) MapInfo->DeviceAddress,
+ (VOID *) (UINTN) MapInfo->HostAddress,
+ MapInfo->NumberOfBytes
+ );
+ }
+
+ VTdLogAddEvent (VTDLOG_PEI_PPI_MAP, (UINT64) HostAddress, Length);
+ return EFI_SUCCESS;
+}
+
+/**
+ Completes the Map() operation and releases any corresponding resources.
+
+ @param [in] This The PPI instance pointer.
+ @param [in] Mapping The mapping value returned from Map().
+
+ @retval EFI_SUCCESS The range was unmapped.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_DEVICE_ERROR The data was not committed to the target system memory.
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuUnmap (
+ IN EDKII_IOMMU_PPI *This,
+ IN VOID *Mapping
+ )
+{
+ MAP_INFO *MapInfo;
+ UINTN Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA(Hob);
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuUnmap - Mapping - %x\n", Mapping));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ if (Mapping == NULL) {
+ return EFI_SUCCESS;
+ }
+
+ MapInfo = Mapping;
+ ASSERT (MapInfo->Signature == MAP_INFO_SIGNATURE);
+ DEBUG ((DEBUG_INFO, " Op(%x):DeviceAddress - %x, NumberOfBytes - %x\n", MapInfo->Operation, (UINTN) MapInfo->DeviceAddress, MapInfo->NumberOfBytes));
+
+ //
+ // If this is a write operation from the Bus Master's point of view,
+ // then copy the contents of the mapped buffer into the real buffer
+ // so the processor can read the contents of the real buffer.
+ //
+ if (MapInfo->Operation == EdkiiIoMmuOperationBusMasterWrite ||
+ MapInfo->Operation == EdkiiIoMmuOperationBusMasterWrite64) {
+ CopyMem (
+ (VOID *) (UINTN) MapInfo->HostAddress,
+ (VOID *) (UINTN) MapInfo->DeviceAddress,
+ MapInfo->NumberOfBytes
+ );
+ }
+
+ Length = MapInfo->NumberOfBytes + sizeof (MAP_INFO);
+ if (DmaBufferInfo->DmaBufferCurrentBottom == MapInfo->DeviceAddress + Length) {
+ DmaBufferInfo->DmaBufferCurrentBottom -= Length;
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Allocates pages that are suitable for an OperationBusMasterCommonBuffer or
+ OperationBusMasterCommonBuffer64 mapping.
+
+ @param [in] This The PPI instance pointer.
+ @param [in] MemoryType The type of memory to allocate, EfiBootServicesData or
+ EfiRuntimeServicesData.
+ @param [in] Pages The number of pages to allocate.
+ @param [in] [out] HostAddress A pointer to store the base system memory address of the
+ allocated range.
+ @param [in] Attributes The requested bit mask of attributes for the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were allocated.
+ @retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are
+ MEMORY_WRITE_COMBINE, MEMORY_CACHED and DUAL_ADDRESS_CYCLE.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated.
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuAllocateBuffer (
+ IN EDKII_IOMMU_PPI *This,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN Pages,
+ IN OUT VOID **HostAddress,
+ IN UINT64 Attributes
+ )
+{
+ UINTN Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA(Hob);
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuAllocateBuffer - page - %x\n", Pages));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ Length = EFI_PAGES_TO_SIZE (Pages);
+ if (Length > DmaBufferInfo->DmaBufferCurrentTop - DmaBufferInfo->DmaBufferCurrentBottom) {
+ DEBUG ((DEBUG_ERROR, "PeiIoMmuAllocateBuffer - OUT_OF_RESOURCE\n"));
+ VTdLogAddEvent (VTDLOG_PEI_VTD_ERROR, VTD_LOG_PEI_VTD_ERROR_PPI_ALLOC, Length);
+ ASSERT (FALSE);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ *HostAddress = (VOID *) (UINTN) (DmaBufferInfo->DmaBufferCurrentTop - Length);
+ DmaBufferInfo->DmaBufferCurrentTop -= Length;
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuAllocateBuffer - allocate - %x\n", *HostAddress));
+
+ VTdLogAddEvent (VTDLOG_PEI_PPI_ALLOC_BUFFER, (UINT64) (*HostAddress), Length);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Frees memory that was allocated with AllocateBuffer().
+
+ @param [in] This The PPI instance pointer.
+ @param [in] Pages The number of pages to free.
+ @param [in] HostAddress The base system memory address of the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were freed.
+ @retval EFI_INVALID_PARAMETER The memory range specified by HostAddress and Pages
+ was not allocated with AllocateBuffer().
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuFreeBuffer (
+ IN EDKII_IOMMU_PPI *This,
+ IN UINTN Pages,
+ IN VOID *HostAddress
+ )
+{
+ UINTN Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA (Hob);
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuFreeBuffer - page - %x, HostAddr - %x\n", Pages, HostAddress));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ Length = EFI_PAGES_TO_SIZE (Pages);
+ if ((UINTN)HostAddress == DmaBufferInfo->DmaBufferCurrentTop) {
+ DmaBufferInfo->DmaBufferCurrentTop += Length;
+ }
+
+ return EFI_SUCCESS;
+}
+
+EDKII_IOMMU_PPI mIoMmuPpi = {
+ EDKII_IOMMU_PPI_REVISION,
+ PeiIoMmuSetAttribute,
+ PeiIoMmuMap,
+ PeiIoMmuUnmap,
+ PeiIoMmuAllocateBuffer,
+ PeiIoMmuFreeBuffer,
+};
+
+CONST EFI_PEI_PPI_DESCRIPTOR mIoMmuPpiList = {
+ EFI_PEI_PPI_DESCRIPTOR_PPI | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST,
+ &gEdkiiIoMmuPpiGuid,
+ (VOID *) &mIoMmuPpi
+};
+
+/**
+ Get ACPI DMAT Table from EdkiiVTdInfo PPI
+
+ @retval Address ACPI DMAT Table address
+ @retval NULL Failed to get ACPI DMAT Table
+**/
+EFI_ACPI_DMAR_HEADER * GetAcpiDmarTable (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_ACPI_DMAR_HEADER *AcpiDmarTable;
+
+ //
+ // Get the DMAR table
+ //
+ Status = PeiServicesLocatePpi (
+ &gEdkiiVTdInfoPpiGuid,
+ 0,
+ NULL,
+ (VOID **)&AcpiDmarTable
+ );
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "Fail to get ACPI DMAR Table : %r\n", Status));
+ AcpiDmarTable = NULL;
+ } else {
+ VtdLibDumpAcpiDmarDrhd (NULL, NULL, AcpiDmarTable);
+ }
+
+ return AcpiDmarTable;
+}
+
+/**
+ Get the VTd engine context information hob.
+
+ @retval The VTd engine context information.
+
+**/
+VTD_INFO *
+GetVTdInfoHob (
+ VOID
+ )
+{
+ VOID *Hob;
+ VTD_INFO *VTdInfo;
+
+ Hob = GetFirstGuidHob (&mVTdInfoGuid);
+ if (Hob == NULL) {
+ VTdInfo = BuildGuidHob (&mVTdInfoGuid, sizeof (VTD_INFO));
+ if (VTdInfo != NULL) {
+ ZeroMem (VTdInfo, sizeof (VTD_INFO));
+ }
+ } else {
+ VTdInfo = GET_GUID_HOB_DATA(Hob);
+ }
+ return VTdInfo;
+}
+
+/**
+ Callback function of parse DMAR DRHD table in pre-memory phase.
+
+ @param [in] [out] Context Callback function context.
+ @param [in] VTdIndex The VTd engine index.
+ @param [in] DmarDrhd The DRHD table.
+
+**/
+VOID
+ProcessDhrdPreMemory (
+ IN OUT VOID *Context,
+ IN UINTN VTdIndex,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *DmarDrhd
+ )
+{
+ DEBUG ((DEBUG_INFO,"VTD (%d) BaseAddress - 0x%016lx\n", VTdIndex, DmarDrhd->RegisterBaseAddress));
+
+ EnableVTdTranslationProtectionBlockDma ((UINTN) DmarDrhd->RegisterBaseAddress);
+}
+
+/**
+ Callback function of parse DMAR DRHD table in post memory phase.
+
+ @param [in] [out] Context Callback function context.
+ @param [in] VTdIndex The VTd engine index.
+ @param [in] DmarDrhd The DRHD table.
+
+**/
+VOID
+ProcessDrhdPostMemory (
+ IN OUT VOID *Context,
+ IN UINTN VTdIndex,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *DmarDrhd
+ )
+{
+ VTD_UNIT_INFO *VtdUnitInfo;
+ UINTN Index;
+
+ VtdUnitInfo = (VTD_UNIT_INFO *) Context;
+
+ if (DmarDrhd->RegisterBaseAddress == 0) {
+ DEBUG ((DEBUG_INFO,"VTd Base Address is 0\n"));
+ ASSERT (FALSE);
+ return;
+ }
+
+ for (Index = 0; Index < VTD_UNIT_MAX; Index++) {
+ if (VtdUnitInfo[Index].VtdUnitBaseAddress == DmarDrhd->RegisterBaseAddress) {
+ DEBUG ((DEBUG_INFO,"Find VTD (%d) [0x%08x] Exist\n", VTdIndex, DmarDrhd->RegisterBaseAddress));
+ return;
+ }
+ }
+
+ for (VTdIndex = 0; VTdIndex < VTD_UNIT_MAX; VTdIndex++) {
+ if (VtdUnitInfo[VTdIndex].VtdUnitBaseAddress == 0) {
+ VtdUnitInfo[VTdIndex].VtdUnitBaseAddress = (UINTN) DmarDrhd->RegisterBaseAddress;
+ VtdUnitInfo[VTdIndex].Segment = DmarDrhd->SegmentNumber;
+ VtdUnitInfo[VTdIndex].Flags = DmarDrhd->Flags;
+ VtdUnitInfo[VTdIndex].Done = FALSE;
+
+ DEBUG ((DEBUG_INFO,"VTD (%d) BaseAddress - 0x%016lx\n", VTdIndex, DmarDrhd->RegisterBaseAddress));
+ DEBUG ((DEBUG_INFO," Segment - %d, Flags - 0x%x\n", DmarDrhd->SegmentNumber, DmarDrhd->Flags));
+ return;
+ }
+ }
+
+ DEBUG ((DEBUG_INFO,"VtdUnitInfo Table is full\n"));
+ ASSERT (FALSE);
+ return;
+}
+
+/**
+ Initializes the Intel VTd Info in post memory phase.
+
+ @retval EFI_SUCCESS Usb bot driver is successfully initialized.
+ @retval EFI_OUT_OF_RESOURCES Can't initialize the driver.
+**/
+EFI_STATUS
+InitVTdInfo (
+ VOID
+ )
+{
+ VTD_INFO *VTdInfo;
+ EFI_ACPI_DMAR_HEADER *AcpiDmarTable;
+ UINTN VtdUnitNumber;
+ VTD_UNIT_INFO *VtdUnitInfo;
+
+ VTdInfo = GetVTdInfoHob ();
+ ASSERT (VTdInfo != NULL);
+
+ AcpiDmarTable = GetAcpiDmarTable ();
+ ASSERT (AcpiDmarTable != NULL);
+
+ if (VTdInfo->VtdUnitInfo == NULL) {
+ //
+ // Genrate a new Vtd Unit Info Table
+ //
+ VTdInfo->VtdUnitInfo = AllocateZeroPages (EFI_SIZE_TO_PAGES (sizeof (VTD_UNIT_INFO) * VTD_UNIT_MAX));
+ if (VTdInfo->VtdUnitInfo == NULL) {
+ DEBUG ((DEBUG_ERROR, "InitVTdInfo - OUT_OF_RESOURCE\n"));
+ ASSERT (FALSE);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ }
+ VtdUnitInfo = VTdInfo->VtdUnitInfo;
+
+ if (VTdInfo->HostAddressWidth == 0) {
+ VTdInfo->HostAddressWidth = AcpiDmarTable->HostAddressWidth;
+ }
+
+ if (VTdInfo->HostAddressWidth != AcpiDmarTable->HostAddressWidth) {
+ DEBUG ((DEBUG_ERROR, "Host Address Width is not match.\n"));
+ ASSERT (FALSE);
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Parse the DMAR ACPI Table to the new Vtd Unit Info Table
+ //
+ VtdUnitNumber = ParseDmarAcpiTableDrhd (AcpiDmarTable, ProcessDrhdPostMemory, VtdUnitInfo);
+ if (VtdUnitNumber == 0) {
+ return EFI_UNSUPPORTED;
+ }
+
+ for (VTdInfo->VTdEngineCount = 0; VTdInfo->VTdEngineCount < VTD_UNIT_MAX; VTdInfo->VTdEngineCount++) {
+ if (VtdUnitInfo[VTdInfo->VTdEngineCount].VtdUnitBaseAddress == 0) {
+ break;
+ }
+ }
+
+ VTdInfo->AcpiDmarTable = AcpiDmarTable;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Initializes the Intel VTd DMAR for block all DMA.
+
+ @retval EFI_SUCCESS Driver is successfully initialized.
+ @retval RETURN_NOT_READY Fail to get VTdInfo Hob .
+**/
+EFI_STATUS
+InitVTdDmarBlockAll (
+ VOID
+ )
+{
+ EFI_ACPI_DMAR_HEADER *AcpiDmarTable;
+
+ //
+ // Get the DMAR table
+ //
+ AcpiDmarTable = GetAcpiDmarTable ();
+ ASSERT (AcpiDmarTable != NULL);
+
+ //
+ // Parse the DMAR table and block all DMA
+ //
+ return ParseDmarAcpiTableDrhd (AcpiDmarTable, ProcessDhrdPreMemory, NULL);
+}
+
+/**
+ Initializes DMA buffer
+
+ @retval EFI_SUCCESS DMA buffer is successfully initialized.
+ @retval EFI_INVALID_PARAMETER Invalid DMA buffer size.
+ @retval EFI_OUT_OF_RESOURCES Can't initialize DMA buffer.
+**/
+EFI_STATUS
+InitDmaBuffer(
+ VOID
+ )
+{
+ DMA_BUFFER_INFO *DmaBufferInfo;
+ VOID *Hob;
+ VOID *VtdPmrHobPtr;
+ VTD_PMR_INFO_HOB *VtdPmrHob;
+
+ DEBUG ((DEBUG_INFO, "InitDmaBuffer :\n"));
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ ASSERT(Hob != NULL);
+ DmaBufferInfo = GET_GUID_HOB_DATA (Hob);
+
+ /**
+ When gVtdPmrInfoDataHobGuid exists, it means:
+ 1. Dma buffer is reserved by memory initialize code
+ 2. PeiGetVtdPmrAlignmentLib is used to get alignment
+ 3. Protection regions are determined by the system memory map
+ 4. Protection regions will be conveyed through VTD_PMR_INFO_HOB
+
+ When gVtdPmrInfoDataHobGuid dosen't exist, it means:
+ 1. IntelVTdDmarPei driver will calcuate the protected memory alignment
+ 2. Dma buffer is reserved by AllocateAlignedPages()
+ **/
+
+
+ if (DmaBufferInfo->DmaBufferSize == 0) {
+ DEBUG ((DEBUG_INFO, " DmaBufferSize is 0\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (DmaBufferInfo->DmaBufferBase == 0) {
+ VtdPmrHobPtr = GetFirstGuidHob (&gVtdPmrInfoDataHobGuid);
+ if (VtdPmrHobPtr != NULL) {
+ //
+ // Get the protected memory ranges information from the VTd PMR hob
+ //
+ VtdPmrHob = GET_GUID_HOB_DATA (VtdPmrHobPtr);
+
+ if ((VtdPmrHob->ProtectedHighBase - VtdPmrHob->ProtectedLowLimit) < DmaBufferInfo->DmaBufferSize) {
+ DEBUG ((DEBUG_ERROR, " DmaBufferSize not enough\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+ DmaBufferInfo->DmaBufferBase = VtdPmrHob->ProtectedLowLimit;
+ } else {
+ //
+ // Allocate memory for DMA buffer
+ //
+ DmaBufferInfo->DmaBufferBase = (UINTN) AllocateAlignedPages (EFI_SIZE_TO_PAGES (DmaBufferInfo->DmaBufferSize), 0);
+ if (DmaBufferInfo->DmaBufferBase == 0) {
+ DEBUG ((DEBUG_ERROR, " InitDmaBuffer : OutOfResource\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+ DEBUG ((DEBUG_INFO, "Alloc DMA buffer success.\n"));
+ }
+
+ DmaBufferInfo->DmaBufferCurrentTop = DmaBufferInfo->DmaBufferBase + DmaBufferInfo->DmaBufferSize;
+ DmaBufferInfo->DmaBufferCurrentBottom = DmaBufferInfo->DmaBufferBase;
+
+ DEBUG ((DEBUG_INFO, " DmaBufferSize : 0x%x\n", DmaBufferInfo->DmaBufferSize));
+ DEBUG ((DEBUG_INFO, " DmaBufferBase : 0x%x\n", DmaBufferInfo->DmaBufferBase));
+ }
+
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop : 0x%x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom : 0x%x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+
+ VTdLogAddEvent (VTDLOG_PEI_PROTECT_MEMORY_RANGE, DmaBufferInfo->DmaBufferCurrentBottom, DmaBufferInfo->DmaBufferCurrentTop);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Initializes the Intel VTd DMAR for DMA buffer.
+
+ @retval EFI_SUCCESS Usb bot driver is successfully initialized.
+ @retval EFI_OUT_OF_RESOURCES Can't initialize the driver.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+InitVTdDmarForDma (
+ VOID
+ )
+{
+ VTD_INFO *VTdInfo;
+
+ EFI_STATUS Status;
+ EFI_PEI_PPI_DESCRIPTOR *OldDescriptor;
+ EDKII_IOMMU_PPI *OldIoMmuPpi;
+
+ VTdInfo = GetVTdInfoHob ();
+ ASSERT (VTdInfo != NULL);
+
+ DEBUG ((DEBUG_INFO, "PrepareVtdConfig\n"));
+ Status = PrepareVtdConfig (VTdInfo);
+ if (EFI_ERROR (Status)) {
+ ASSERT_EFI_ERROR (Status);
+ return Status;
+ }
+
+ // create root entry table
+ DEBUG ((DEBUG_INFO, "SetupTranslationTable\n"));
+ Status = SetupTranslationTable (VTdInfo);
+ if (EFI_ERROR (Status)) {
+ ASSERT_EFI_ERROR (Status);
+ return Status;
+ }
+
+ DEBUG ((DEBUG_INFO, "EnableVtdDmar\n"));
+ Status = EnableVTdTranslationProtection(VTdInfo);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ DEBUG ((DEBUG_INFO, "Install gEdkiiIoMmuPpiGuid\n"));
+ // install protocol
+ //
+ // (Re)Install PPI.
+ //
+ Status = PeiServicesLocatePpi (
+ &gEdkiiIoMmuPpiGuid,
+ 0,
+ &OldDescriptor,
+ (VOID **) &OldIoMmuPpi
+ );
+ if (!EFI_ERROR (Status)) {
+ Status = PeiServicesReInstallPpi (OldDescriptor, &mIoMmuPpiList);
+ } else {
+ Status = PeiServicesInstallPpi (&mIoMmuPpiList);
+ }
+ ASSERT_EFI_ERROR (Status);
+
+ return Status;
+}
+
+/**
+ This function handles S3 resume task at the end of PEI
+
+ @param[in] PeiServices Pointer to PEI Services Table.
+ @param[in] NotifyDesc Pointer to the descriptor for the Notification event that
+ caused this function to execute.
+ @param[in] Ppi Pointer to the PPI data associated with this function.
+
+ @retval EFI_STATUS Always return EFI_SUCCESS
+**/
+EFI_STATUS
+EFIAPI
+S3EndOfPeiNotify(
+ IN EFI_PEI_SERVICES **PeiServices,
+ IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDesc,
+ IN VOID *Ppi
+ )
+{
+ DEBUG ((DEBUG_INFO, "VTd DMAR PEI S3EndOfPeiNotify\n"));
+
+ if ((PcdGet8 (PcdVTdPolicyPropertyMask) & BIT1) == 0) {
+ DumpVtdIfError ();
+
+ DisableVTdTranslationProtection (GetVTdInfoHob ());
+ }
+ return EFI_SUCCESS;
+}
+
+EFI_PEI_NOTIFY_DESCRIPTOR mS3EndOfPeiNotifyDesc = {
+ (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
+ &gEfiEndOfPeiSignalPpiGuid,
+ S3EndOfPeiNotify
+};
+
+/**
+ This function handles VTd engine setup
+
+ @param[in] PeiServices Pointer to PEI Services Table.
+ @param[in] NotifyDesc Pointer to the descriptor for the Notification event that
+ caused this function to execute.
+ @param[in] Ppi Pointer to the PPI data associated with this function.
+
+ @retval EFI_STATUS Always return EFI_SUCCESS
+**/
+EFI_STATUS
+EFIAPI
+VTdInfoNotify (
+ IN EFI_PEI_SERVICES **PeiServices,
+ IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDesc,
+ IN VOID *Ppi
+ )
+{
+ EFI_STATUS Status;
+ VOID *MemoryDiscovered;
+ BOOLEAN MemoryInitialized;
+
+ DEBUG ((DEBUG_INFO, "VTdInfoNotify\n"));
+
+ //
+ // Check if memory is initialized.
+ //
+ MemoryInitialized = FALSE;
+ Status = PeiServicesLocatePpi (
+ &gEfiPeiMemoryDiscoveredPpiGuid,
+ 0,
+ NULL,
+ &MemoryDiscovered
+ );
+ if (!EFI_ERROR(Status)) {
+ MemoryInitialized = TRUE;
+ }
+
+ DEBUG ((DEBUG_INFO, "MemoryInitialized - %x\n", MemoryInitialized));
+
+ if (!MemoryInitialized) {
+ //
+ // If the memory is not initialized,
+ // Protect all system memory
+ //
+
+ InitVTdDmarBlockAll ();
+
+ //
+ // Install PPI.
+ //
+ Status = PeiServicesInstallPpi (&mIoMmuPpiList);
+ ASSERT_EFI_ERROR(Status);
+ } else {
+ //
+ // If the memory is initialized,
+ // Allocate DMA buffer and protect rest system memory
+ //
+
+ VTdLogInitialize (TRUE);
+
+ Status = InitDmaBuffer ();
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // NOTE: We need reinit VTdInfo because previous information might be overriden.
+ //
+ Status = InitVTdInfo ();
+ ASSERT_EFI_ERROR (Status);
+
+ Status = InitVTdDmarForDma ();
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ return EFI_SUCCESS;
+}
+
+EFI_PEI_NOTIFY_DESCRIPTOR mVTdInfoNotifyDesc = {
+ (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
+ &gEdkiiVTdInfoPpiGuid,
+ VTdInfoNotify
+};
+
+/**
+ Initializes the Intel VTd DMAR PEIM.
+
+ @param[in] FileHandle Handle of the file being invoked.
+ @param[in] PeiServices Describes the list of possible PEI Services.
+
+ @retval EFI_SUCCESS Usb bot driver is successfully initialized.
+ @retval EFI_OUT_OF_RESOURCES Can't initialize the driver.
+**/
+EFI_STATUS
+EFIAPI
+IntelVTdDmarInitialize (
+ IN EFI_PEI_FILE_HANDLE FileHandle,
+ IN CONST EFI_PEI_SERVICES **PeiServices
+ )
+{
+ EFI_STATUS Status;
+ EFI_BOOT_MODE BootMode;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ DEBUG ((DEBUG_INFO, "IntelVTdDmarInitialize\n"));
+
+ if ((PcdGet8(PcdVTdPolicyPropertyMask) & BIT0) == 0) {
+ return EFI_UNSUPPORTED;
+ }
+
+ VTdLogInitialize (FALSE);
+
+ DmaBufferInfo = BuildGuidHob (&mDmaBufferInfoGuid, sizeof (DMA_BUFFER_INFO));
+ ASSERT(DmaBufferInfo != NULL);
+ if (DmaBufferInfo == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+ ZeroMem (DmaBufferInfo, sizeof (DMA_BUFFER_INFO));
+
+ PeiServicesGetBootMode (&BootMode);
+
+ if (BootMode == BOOT_ON_S3_RESUME) {
+ DmaBufferInfo->DmaBufferSize = PcdGet32 (PcdVTdPeiDmaBufferSizeS3);
+ } else {
+ DmaBufferInfo->DmaBufferSize = PcdGet32 (PcdVTdPeiDmaBufferSize);
+ }
+
+ Status = PeiServicesNotifyPpi (&mVTdInfoNotifyDesc);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Register EndOfPei Notify for S3
+ //
+ if (BootMode == BOOT_ON_S3_RESUME) {
+ Status = PeiServicesNotifyPpi (&mS3EndOfPeiNotifyDesc);
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.h b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.h
new file mode 100644
index 000000000..1ddf8fbf7
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.h
@@ -0,0 +1,272 @@
+/** @file
+ The definition for DMA access Library.
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef __DMA_ACCESS_LIB_H__
+#define __DMA_ACCESS_LIB_H__
+
+#include <Library/IntelVTdPeiDxeLib.h>
+
+#define VTD_64BITS_ADDRESS(Lo, Hi) (LShiftU64 (Lo, 12) | LShiftU64 (Hi, 32))
+
+//
+// Use 256-bit descriptor
+// Queue size is 128.
+//
+#define VTD_QUEUED_INVALIDATION_DESCRIPTOR_WIDTH 1
+#define VTD_INVALIDATION_QUEUE_SIZE 0
+
+typedef struct {
+ BOOLEAN Done;
+ UINTN VtdUnitBaseAddress;
+ UINT16 Segment;
+ UINT8 Flags;
+ VTD_VER_REG VerReg;
+ VTD_CAP_REG CapReg;
+ VTD_ECAP_REG ECapReg;
+ BOOLEAN Is5LevelPaging;
+ UINT8 EnableQueuedInvalidation;
+ VOID *QiDescBuffer;
+ UINTN QiDescBufferSize;
+ UINTN FixedSecondLevelPagingEntry;
+ UINTN RootEntryTable;
+ UINTN ExtRootEntryTable;
+ UINTN RootEntryTablePageSize;
+ UINTN ExtRootEntryTablePageSize;
+} VTD_UNIT_INFO;
+
+typedef struct {
+ EFI_ACPI_DMAR_HEADER *AcpiDmarTable;
+ UINT8 HostAddressWidth;
+ VTD_REGESTER_THIN_INFO *RegsInfoBuffer;
+ UINTN VTdEngineCount;
+ VTD_UNIT_INFO *VtdUnitInfo;
+} VTD_INFO;
+
+typedef struct {
+ UINTN DmaBufferBase;
+ UINTN DmaBufferSize;
+ UINTN DmaBufferCurrentTop;
+ UINTN DmaBufferCurrentBottom;
+} DMA_BUFFER_INFO;
+
+typedef
+VOID
+(*PROCESS_DRHD_CALLBACK_FUNC) (
+ IN OUT VOID *Context,
+ IN UINTN VTdIndex,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *DmarDrhd
+ );
+
+/**
+ Enable VTd translation table protection for block DMA
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableVTdTranslationProtectionBlockDma (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ Enable VTd translation table protection.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableVTdTranslationProtection (
+ IN VTD_INFO *VTdInfo
+ );
+
+/**
+ Disable VTd translation table protection.
+
+ @param[in] VTdInfo The VTd engine context information.
+**/
+VOID
+DisableVTdTranslationProtection (
+ IN VTD_INFO *VTdInfo
+ );
+
+/**
+ Parse DMAR DRHD table.
+
+ @param[in] AcpiDmarTable DMAR ACPI table
+ @param[in] Callback Callback function for handle DRHD
+ @param[in] Context Callback function Context
+
+ @return the VTd engine number.
+
+**/
+UINTN
+ParseDmarAcpiTableDrhd (
+ IN EFI_ACPI_DMAR_HEADER *AcpiDmarTable,
+ IN PROCESS_DRHD_CALLBACK_FUNC Callback,
+ IN VOID *Context
+ );
+
+/**
+ Dump DMAR ACPI table.
+
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+DumpAcpiDMAR (
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ );
+
+/**
+ Prepare VTD configuration.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS Prepare Vtd config success
+**/
+EFI_STATUS
+PrepareVtdConfig (
+ IN VTD_INFO *VTdInfo
+ );
+
+/**
+ Setup VTd translation table.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCE Setup translation table fail.
+**/
+EFI_STATUS
+SetupTranslationTable (
+ IN VTD_INFO *VTdInfo
+ );
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+FlushPageTableMemory (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN UINTN Base,
+ IN UINTN Size
+ );
+
+/**
+ Allocate zero pages.
+
+ @param[in] Pages the number of pages.
+
+ @return the page address.
+ @retval NULL No resource to allocate pages.
+**/
+VOID *
+EFIAPI
+AllocateZeroPages (
+ IN UINTN Pages
+ );
+
+/**
+ Return the index of PCI data.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @return The index of the PCI data.
+ @retval (UINTN)-1 The PCI data is not found.
+**/
+UINTN
+GetPciDataIndex (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ );
+
+/**
+ Get the VTd engine context information hob.
+
+ @retval The VTd engine context information.
+
+**/
+VTD_INFO *
+GetVTdInfoHob (
+ VOID
+ );
+
+/**
+ Dump VTd registers if there is error.
+**/
+VOID
+DumpVtdIfError (
+ VOID
+ );
+
+/**
+ Add the VTd log event in post memory phase.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+
+**/
+VOID
+EFIAPI
+VTdLogAddEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Data1,
+ IN CONST UINT64 Data2
+ );
+
+/**
+ Add a new VTd log event with data.
+
+ @param[in] EventType Event type
+ @param[in] Param parameter
+ @param[in] Data Data
+ @param[in] DataSize Data size
+
+**/
+VOID
+EFIAPI
+VTdLogAddDataEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Param,
+ IN CONST VOID *Data,
+ IN CONST UINT32 DataSize
+ );
+
+/**
+ Add the VTd log event in pre-memory phase.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Mode Pre-memory DMA protection mode.
+ @param[in] Status Status
+
+**/
+VOID
+EFIAPI
+VTdLogAddPreMemoryEvent (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT8 Mode,
+ IN UINT8 Status
+ );
+
+extern EFI_GUID mVTdInfoGuid;
+extern EFI_GUID mDmaBufferInfoGuid;
+
+#endif
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.inf b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.inf
new file mode 100644
index 000000000..f756c543c
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.inf
@@ -0,0 +1,70 @@
+## @file
+# Component INF file for the Intel VTd DMAR PEIM.
+#
+# This driver initializes VTd engine based upon EDKII_VTD_INFO_PPI
+# and provide DMA protection in PEI.
+#
+# Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010017
+ BASE_NAME = IntelVTdCorePei
+ MODULE_UNI_FILE = IntelVTdCorePei.uni
+ FILE_GUID = 9311b0cc-5c08-4c0a-bec8-23afab024e48
+ MODULE_TYPE = PEIM
+ VERSION_STRING = 2.0
+ ENTRY_POINT = IntelVTdDmarInitialize
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ IntelSiliconPkg/IntelSiliconPkg.dec
+
+[Sources]
+ IntelVTdCorePei.c
+ IntelVTdCorePei.h
+ IntelVTdDmar.c
+ DmarTable.c
+ TranslationTable.c
+
+[LibraryClasses]
+ DebugLib
+ BaseMemoryLib
+ BaseLib
+ PeimEntryPoint
+ PeiServicesLib
+ HobLib
+ IoLib
+ CacheMaintenanceLib
+ PciSegmentLib
+ IntelVTdPeiDxeLib
+
+[Guids]
+ gVTdLogBufferHobGuid ## PRODUCES CONSUMES
+ gVtdPmrInfoDataHobGuid ## CONSUMES
+
+[Ppis]
+ gEdkiiIoMmuPpiGuid ## PRODUCES
+ gEdkiiVTdInfoPpiGuid ## CONSUMES
+ gEfiPeiMemoryDiscoveredPpiGuid ## CONSUMES
+ gEfiEndOfPeiSignalPpiGuid ## CONSUMES
+ gEdkiiVTdNullRootEntryTableGuid ## CONSUMES
+
+[Pcd]
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPolicyPropertyMask ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiDmaBufferSize ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiDmaBufferSizeS3 ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdSupportAbortDmaMode ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdLogLevel ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiPostMemLogBufferSize ## CONSUMES
+
+[Depex]
+ gEfiPeiMasterBootModePpiGuid AND
+ gEdkiiVTdInfoPpiGuid
+
+[UserExtensions.TianoCore."ExtraFiles"]
+ IntelVTdCorePeiExtra.uni
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.uni b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.uni
new file mode 100644
index 000000000..2b5b260f5
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.uni
@@ -0,0 +1,14 @@
+// /** @file
+// IntelVTdDmarPei Module Localized Abstract and Description Content
+//
+// Copyright (c) 2020, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+
+#string STR_MODULE_ABSTRACT #language en-US "Intel VTd CORE PEI Driver."
+
+#string STR_MODULE_DESCRIPTION #language en-US "This driver initializes VTd engine based upon EDKII_VTD_INFO_PPI and provide DMA protection to device in PEI."
+
--git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePeiExtra.uni b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePeiExtra.uni
new file mode 100644
index 000000000..14848f924
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePeiExtra.uni
@@ -0,0 +1,14 @@
+// /** @file
+// IntelVTdDmarPei Localized Strings and Content
+//
+// Copyright (c) 2020, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+#string STR_PROPERTIES_MODULE_NAME
+#language en-US
+"Intel VTd CORE PEI Driver"
+
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdDmar.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdDmar.c
new file mode 100644
index 000000000..011a32f2a
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdDmar.c
@@ -0,0 +1,731 @@
+/** @file
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <PiPei.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/IoLib.h>
+#include <Library/DebugLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/PeiServicesLib.h>
+#include <IndustryStandard/Vtd.h>
+#include <Ppi/VtdInfo.h>
+#include <Ppi/VtdNullRootEntryTable.h>
+#include <Ppi/IoMmu.h>
+#include <Guid/VtdLogDataHob.h>
+#include "IntelVTdCorePei.h"
+
+#define VTD_CAP_REG_NFR_MAX (256)
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+FlushPageTableMemory (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN UINTN Base,
+ IN UINTN Size
+ )
+{
+ if (VtdUnitInfo->ECapReg.Bits.C == 0) {
+ WriteBackDataCacheRange ((VOID *) Base, Size);
+ }
+}
+
+/**
+ Perpare cache invalidation interface.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval EFI_UNSUPPORTED Invalidation method is not supported.
+ @retval EFI_OUT_OF_RESOURCES A memory allocation failed.
+**/
+EFI_STATUS
+PerpareCacheInvalidationInterface (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINT32 Reg32;
+ VTD_ECAP_REG ECapReg;
+ VTD_IQA_REG IqaReg;
+ UINTN VtdUnitBaseAddress;
+
+ VtdUnitBaseAddress = VtdUnitInfo->VtdUnitBaseAddress;
+
+ if (VtdUnitInfo->VerReg.Bits.Major <= 5) {
+ VtdUnitInfo->EnableQueuedInvalidation = 0;
+ DEBUG ((DEBUG_INFO, "Use Register-based Invalidation Interface for engine [0x%x]\n", VtdUnitBaseAddress));
+ return EFI_SUCCESS;
+ }
+
+ ECapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ if (ECapReg.Bits.QI == 0) {
+ DEBUG ((DEBUG_ERROR, "Hardware does not support queued invalidations interface for engine [0x%x]\n", VtdUnitBaseAddress));
+ return EFI_UNSUPPORTED;
+ }
+
+ VtdUnitInfo->EnableQueuedInvalidation = 1;
+ DEBUG ((DEBUG_INFO, "Use Queued Invalidation Interface for engine [0x%x]\n", VtdUnitBaseAddress));
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ if ((Reg32 & B_GSTS_REG_QIES) != 0) {
+ DEBUG ((DEBUG_INFO,"Queued Invalidation Interface was enabled.\n"));
+ Reg32 &= (~B_GSTS_REG_QIES);
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32);
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_QIES) != 0);
+ MmioWrite64 (VtdUnitBaseAddress + R_IQA_REG, 0);
+ }
+
+ //
+ // Initialize the Invalidation Queue Tail Register to zero.
+ //
+ MmioWrite64 (VtdUnitBaseAddress + R_IQT_REG, 0);
+
+ //
+ // Setup the IQ address, size and descriptor width through the Invalidation Queue Address Register
+ //
+ if (VtdUnitInfo->QiDescBuffer == NULL) {
+ VtdUnitInfo->QiDescBufferSize = (sizeof (QI_256_DESC) * ((UINTN) 1 << (VTD_INVALIDATION_QUEUE_SIZE + 7)));
+ VtdUnitInfo->QiDescBuffer = AllocatePages (EFI_SIZE_TO_PAGES (VtdUnitInfo->QiDescBufferSize));
+ if (VtdUnitInfo->QiDescBuffer == NULL) {
+ DEBUG ((DEBUG_ERROR,"Could not Alloc Invalidation Queue Buffer.\n"));
+ VTdLogAddEvent (VTDLOG_PEI_QUEUED_INVALIDATION, VTD_LOG_QI_ERROR_OUT_OF_RESOURCES, VtdUnitBaseAddress);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ }
+
+ DEBUG ((DEBUG_INFO, "Invalidation Queue Buffer Size : %d\n", VtdUnitInfo->QiDescBufferSize));
+ //
+ // 4KB Aligned address
+ //
+ IqaReg.Uint64 = (UINT64) (UINTN) VtdUnitInfo->QiDescBuffer;
+ IqaReg.Bits.DW = VTD_QUEUED_INVALIDATION_DESCRIPTOR_WIDTH;
+ IqaReg.Bits.QS = VTD_INVALIDATION_QUEUE_SIZE;
+ MmioWrite64 (VtdUnitBaseAddress + R_IQA_REG, IqaReg.Uint64);
+ IqaReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQA_REG);
+ DEBUG ((DEBUG_INFO, "IQA_REG = 0x%lx, IQH_REG = 0x%lx\n", IqaReg.Uint64, MmioRead64 (VtdUnitBaseAddress + R_IQH_REG)));
+
+ //
+ // Enable the queued invalidation interface through the Global Command Register.
+ // When enabled, hardware sets the QIES field in the Global Status Register.
+ //
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Reg32 |= B_GMCD_REG_QIE;
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32);
+ DEBUG ((DEBUG_INFO, "Enable Queued Invalidation Interface. GCMD_REG = 0x%x\n", Reg32));
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_QIES) == 0);
+
+ VTdLogAddEvent (VTDLOG_PEI_QUEUED_INVALIDATION, VTD_LOG_QI_ENABLE, VtdUnitBaseAddress);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Submit the queued invalidation descriptor to the remapping
+ hardware unit and wait for its completion.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Desc The invalidate descriptor
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval RETURN_DEVICE_ERROR A fault is detected.
+ @retval EFI_INVALID_PARAMETER Parameter is invalid.
+**/
+EFI_STATUS
+SubmitQueuedInvalidationDescriptor (
+ IN UINTN VtdUnitBaseAddress,
+ IN QI_256_DESC *Desc
+ )
+{
+ EFI_STATUS Status;
+ VTD_REGESTER_QI_INFO RegisterQi;
+
+ Status = VtdLibSubmitQueuedInvalidationDescriptor (VtdUnitBaseAddress, Desc, FALSE);
+ if (Status == EFI_DEVICE_ERROR) {
+ RegisterQi.BaseAddress = VtdUnitBaseAddress;
+ RegisterQi.FstsReg = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);;
+ RegisterQi.IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+ VTdLogAddDataEvent (VTDLOG_PEI_REGISTER, VTDLOG_REGISTER_QI, &RegisterQi, sizeof (VTD_REGESTER_QI_INFO));
+
+ MmioWrite32 (VtdUnitBaseAddress + R_FSTS_REG, RegisterQi.FstsReg & (B_FSTS_REG_IQE | B_FSTS_REG_ITE | B_FSTS_REG_ICE));
+ }
+
+ return Status;
+}
+
+/**
+ Invalidate VTd context cache.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+**/
+EFI_STATUS
+InvalidateContextCache (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINT64 Reg64;
+ QI_256_DESC QiDesc;
+
+ if (VtdUnitInfo->EnableQueuedInvalidation == 0) {
+ //
+ // Register-based Invalidation
+ //
+ Reg64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + R_CCMD_REG);
+ if ((Reg64 & B_CCMD_REG_ICC) != 0) {
+ DEBUG ((DEBUG_ERROR,"ERROR: InvalidateContextCache: B_CCMD_REG_ICC is set for VTD(%x)\n", VtdUnitInfo->VtdUnitBaseAddress));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Reg64 &= ((~B_CCMD_REG_ICC) & (~B_CCMD_REG_CIRG_MASK));
+ Reg64 |= (B_CCMD_REG_ICC | V_CCMD_REG_CIRG_GLOBAL);
+ MmioWrite64 (VtdUnitInfo->VtdUnitBaseAddress + R_CCMD_REG, Reg64);
+
+ do {
+ Reg64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + R_CCMD_REG);
+ } while ((Reg64 & B_CCMD_REG_ICC) != 0);
+ } else {
+ //
+ // Queued Invalidation
+ //
+ QiDesc.Uint64[0] = QI_CC_FM(0) | QI_CC_SID(0) | QI_CC_DID(0) | QI_CC_GRAN(1) | QI_CC_TYPE;
+ QiDesc.Uint64[1] = 0;
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ return SubmitQueuedInvalidationDescriptor(VtdUnitInfo->VtdUnitBaseAddress, &QiDesc);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Invalidate VTd IOTLB.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+**/
+EFI_STATUS
+InvalidateIOTLB (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINT64 Reg64;
+ VTD_ECAP_REG ECapReg;
+ VTD_CAP_REG CapReg;
+ QI_256_DESC QiDesc;
+
+ if (VtdUnitInfo->EnableQueuedInvalidation == 0) {
+ //
+ // Register-based Invalidation
+ //
+ ECapReg.Uint64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + R_ECAP_REG);
+
+ Reg64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+ if ((Reg64 & B_IOTLB_REG_IVT) != 0) {
+ DEBUG ((DEBUG_ERROR, "ERROR: InvalidateIOTLB: B_IOTLB_REG_IVT is set for VTD(%x)\n", VtdUnitInfo->VtdUnitBaseAddress));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Reg64 &= ((~B_IOTLB_REG_IVT) & (~B_IOTLB_REG_IIRG_MASK));
+ Reg64 |= (B_IOTLB_REG_IVT | V_IOTLB_REG_IIRG_GLOBAL);
+ MmioWrite64 (VtdUnitInfo->VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IOTLB_REG, Reg64);
+
+ do {
+ Reg64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+ } while ((Reg64 & B_IOTLB_REG_IVT) != 0);
+ } else {
+ //
+ // Queued Invalidation
+ //
+ CapReg.Uint64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + R_CAP_REG);
+ QiDesc.Uint64[0] = QI_IOTLB_DID(0) | QI_IOTLB_DR(CAP_READ_DRAIN(CapReg.Uint64)) | QI_IOTLB_DW(CAP_WRITE_DRAIN(CapReg.Uint64)) | QI_IOTLB_GRAN(1) | QI_IOTLB_TYPE;
+ QiDesc.Uint64[1] = QI_IOTLB_ADDR(0) | QI_IOTLB_IH(0) | QI_IOTLB_AM(0);
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ return SubmitQueuedInvalidationDescriptor(VtdUnitInfo->VtdUnitBaseAddress, &QiDesc);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Enable DMAR translation in pre-mem phase.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] RtaddrRegValue The value of RTADDR_REG.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableDmarPreMem (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT64 RtaddrRegValue
+ )
+{
+ UINT32 Reg32;
+
+ DEBUG ((DEBUG_INFO, ">>>>>>EnableDmarPreMem() for engine [%x] \n", VtdUnitBaseAddress));
+
+ DEBUG ((DEBUG_INFO, "RTADDR_REG : 0x%016lx \n", RtaddrRegValue));
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, RtaddrRegValue);
+
+ DEBUG ((DEBUG_INFO, "EnableDmarPreMem: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ DEBUG ((DEBUG_INFO, "EnableDmarPreMem: R_GSTS_REG = 0x%x \n", Reg32));
+
+ //
+ // Write Buffer Flush
+ //
+ VtdLibFlushWriteBuffer (VtdUnitBaseAddress);
+
+ //
+ // Enable VTd
+ //
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ DEBUG ((DEBUG_INFO, "VTD () enabled!<<<<<<\n"));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Enable DMAR translation.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] RootEntryTable The address of the VTd RootEntryTable.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableDmar (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN UINTN RootEntryTable
+ )
+{
+ UINTN VtdUnitBaseAddress;
+ BOOLEAN TEWasEnabled;
+
+ VtdUnitBaseAddress = VtdUnitInfo->VtdUnitBaseAddress;
+
+ DEBUG ((DEBUG_INFO, ">>>>>>EnableDmar() for engine [%x] \n", VtdUnitBaseAddress));
+
+ //
+ // Check TE was enabled or not.
+ //
+ TEWasEnabled = ((MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG) & B_GSTS_REG_TE) == B_GSTS_REG_TE);
+
+ if (TEWasEnabled && (VtdUnitInfo->ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ //
+ // For implementations reporting Enhanced SRTP Support (ESRTPS) field as
+ // Clear in the Capability register, software must not modify this field while
+ // DMA remapping is active (TES=1 in Global Status register).
+ //
+ if (VtdUnitInfo->CapReg.Bits.ESRTPS == 0) {
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+ }
+
+ //
+ // Enable ADM
+ //
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, (UINT64) (RootEntryTable | V_RTADDR_REG_TTM_ADM));
+
+ DEBUG ((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+
+ DEBUG ((DEBUG_INFO, "Enable Abort DMA Mode...\n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ } else {
+ DEBUG ((DEBUG_INFO, "RootEntryTable 0x%x \n", RootEntryTable));
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, (UINT64) RootEntryTable);
+
+ DEBUG ((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+ }
+
+ //
+ // Write Buffer Flush before invalidation
+ //
+ VtdLibFlushWriteBuffer (VtdUnitBaseAddress);
+
+ //
+ // Invalidate the context cache
+ //
+ InvalidateContextCache (VtdUnitInfo);
+
+ //
+ // Invalidate the IOTLB cache
+ //
+ InvalidateIOTLB (VtdUnitInfo);
+
+ if (TEWasEnabled && (VtdUnitInfo->ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ if (VtdUnitInfo->CapReg.Bits.ESRTPS == 0) {
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+ }
+
+ DEBUG ((DEBUG_INFO, "RootEntryTable 0x%x \n", RootEntryTable));
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, (UINT64) RootEntryTable);
+
+ DEBUG ((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+ }
+
+ //
+ // Enable VTd
+ //
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ DEBUG ((DEBUG_INFO, "VTD () enabled!<<<<<<\n"));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Enable VTd translation table protection for block DMA
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableVTdTranslationProtectionBlockDma (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ EFI_STATUS Status;
+ VTD_ECAP_REG ECapReg;
+ EDKII_VTD_NULL_ROOT_ENTRY_TABLE_PPI *RootEntryTable;
+ UINT8 Mode;
+
+ DEBUG ((DEBUG_INFO, "EnableVTdTranslationProtectionBlockDma - 0x%08x\n", VtdUnitBaseAddress));
+
+ DEBUG ((DEBUG_INFO, "PcdVTdSupportAbortDmaMode : %d\n", PcdGetBool (PcdVTdSupportAbortDmaMode)));
+
+ ECapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ DEBUG ((DEBUG_INFO, "ECapReg.ADMS : %d\n", ECapReg.Bits.ADMS));
+
+ if ((ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ Mode = VTD_LOG_PEI_PRE_MEM_ADM;
+ //
+ // Use Abort DMA Mode
+ //
+ DEBUG ((DEBUG_INFO, "Enable abort DMA mode.\n"));
+ Status = EnableDmarPreMem (VtdUnitBaseAddress, V_RTADDR_REG_TTM_ADM);
+ } else {
+ //
+ // Use Null Root Entry Table
+ //
+ Status = PeiServicesLocatePpi (
+ &gEdkiiVTdNullRootEntryTableGuid,
+ 0,
+ NULL,
+ (VOID **)&RootEntryTable
+ );
+ if (EFI_ERROR (Status)) {
+ Mode = VTD_LOG_PEI_PRE_MEM_DISABLE;
+ DEBUG ((DEBUG_ERROR, "Locate Null Root Entry Table Ppi Failed : %r\n", Status));
+ ASSERT (FALSE);
+ } else {
+ Mode = VTD_LOG_PEI_PRE_MEM_TE;
+ DEBUG ((DEBUG_INFO, "Block All DMA by TE.\n"));
+ Status = EnableDmarPreMem (VtdUnitBaseAddress, (UINT64) (*RootEntryTable));
+ }
+ }
+
+ VTdLogAddPreMemoryEvent (VtdUnitBaseAddress, Mode, EFI_ERROR (Status) ? 0 : 1);
+
+ return Status;
+}
+
+/**
+ Enable VTd translation table protection.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableVTdTranslationProtection (
+ IN VTD_INFO *VTdInfo
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+ VTD_UNIT_INFO *VtdUnitInfo;
+
+ for (Index = 0; Index < VTdInfo->VTdEngineCount; Index++) {
+ VtdUnitInfo = &VTdInfo->VtdUnitInfo[Index];
+ if (VtdUnitInfo->Done) {
+ DEBUG ((DEBUG_INFO, "EnableVtdDmar (%d) was enabled\n", Index));
+ continue;
+ }
+
+ if (VtdUnitInfo->ExtRootEntryTable != 0) {
+ DEBUG ((DEBUG_INFO, "EnableVtdDmar (%d) ExtRootEntryTable 0x%x\n", Index, VtdUnitInfo->ExtRootEntryTable));
+ Status = EnableDmar (VtdUnitInfo, VtdUnitInfo->ExtRootEntryTable | BIT11);
+ } else {
+ DEBUG ((DEBUG_INFO, "EnableVtdDmar (%d) RootEntryTable 0x%x\n", Index, VtdUnitInfo->RootEntryTable));
+ Status = EnableDmar (VtdUnitInfo, VtdUnitInfo->RootEntryTable);
+ }
+
+ VTdLogAddEvent (VTDLOG_PEI_POST_MEM_ENABLE_DMA_PROTECT, VTdInfo->VtdUnitInfo[Index].VtdUnitBaseAddress, Status);
+
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "EnableVtdDmar (%d) Failed !\n", Index));
+ return Status;
+ }
+ VtdUnitInfo->Done = TRUE;
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Disable VTd translation table protection.
+
+ @param[in] VTdInfo The VTd engine context information.
+**/
+VOID
+DisableVTdTranslationProtection (
+ IN VTD_INFO *VTdInfo
+ )
+{
+ UINTN Index;
+ VTD_UNIT_INFO *VtdUnitInfo;
+
+ if (VTdInfo == NULL) {
+ return;
+ }
+
+ DEBUG ((DEBUG_INFO, "DisableVTdTranslationProtection - %d Vtd Engine\n", VTdInfo->VTdEngineCount));
+
+ for (Index = 0; Index < VTdInfo->VTdEngineCount; Index++) {
+ VtdUnitInfo = &VTdInfo->VtdUnitInfo[Index];
+
+ VtdLibDisableDmar (VtdUnitInfo->VtdUnitBaseAddress);
+ VTdLogAddEvent (VTDLOG_PEI_POST_MEM_DISABLE_DMA_PROTECT, VtdUnitInfo->VtdUnitBaseAddress, 0);
+
+ if (VtdUnitInfo->EnableQueuedInvalidation != 0) {
+ //
+ // Disable queued invalidation interface.
+ //
+ VtdLibDisableQueuedInvalidationInterface (VtdUnitInfo->VtdUnitBaseAddress);
+
+ if (VtdUnitInfo->QiDescBuffer != NULL) {
+ FreePages(VtdUnitInfo->QiDescBuffer, EFI_SIZE_TO_PAGES (VtdUnitInfo->QiDescBufferSize));
+ VtdUnitInfo->QiDescBuffer = NULL;
+ VtdUnitInfo->QiDescBufferSize = 0;
+ }
+
+ VtdUnitInfo->EnableQueuedInvalidation = 0;
+ VTdLogAddEvent (VTDLOG_PEI_QUEUED_INVALIDATION, VTD_LOG_QI_DISABLE, VtdUnitInfo->VtdUnitBaseAddress);
+ }
+ }
+
+ return;
+}
+
+/**
+ Check if VTd engine use 5 level paging.
+
+ @param[in] HostAddressWidth Host Address Width.
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[out] Is5LevelPaging Use 5 level paging or not
+
+ @retval EFI_SUCCESS Success
+ @retval EFI_UNSUPPORTED Feature is not support
+
+**/
+EFI_STATUS
+VtdCheckUsing5LevelPaging (
+ IN UINT8 HostAddressWidth,
+ IN VTD_CAP_REG CapReg,
+ OUT BOOLEAN *Is5LevelPaging
+ )
+{
+ DEBUG ((DEBUG_INFO, " CapReg SAGAW bits : 0x%02x\n", CapReg.Bits.SAGAW));
+
+ *Is5LevelPaging = FALSE;
+ if ((CapReg.Bits.SAGAW & BIT3) != 0) {
+ *Is5LevelPaging = TRUE;
+ if ((HostAddressWidth <= 48) &&
+ ((CapReg.Bits.SAGAW & BIT2) != 0)) {
+ *Is5LevelPaging = FALSE;
+ } else {
+ return EFI_UNSUPPORTED;
+ }
+ }
+ if ((CapReg.Bits.SAGAW & (BIT3 | BIT2)) == 0) {
+ return EFI_UNSUPPORTED;
+ }
+ DEBUG ((DEBUG_INFO, " Using %d Level Paging\n", *Is5LevelPaging ? 5 : 4));
+ return EFI_SUCCESS;
+}
+
+
+/**
+ Prepare VTD configuration.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS Prepare Vtd config success
+**/
+EFI_STATUS
+PrepareVtdConfig (
+ IN VTD_INFO *VTdInfo
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+ VTD_UNIT_INFO *VtdUnitInfo;
+ UINTN VtdUnitBaseAddress;
+
+ if (VTdInfo->RegsInfoBuffer == NULL) {
+ VTdInfo->RegsInfoBuffer = AllocateZeroPages (EFI_SIZE_TO_PAGES (sizeof (VTD_REGESTER_THIN_INFO) + sizeof (VTD_UINT128) * VTD_CAP_REG_NFR_MAX));
+ ASSERT (VTdInfo->RegsInfoBuffer != NULL);
+ }
+
+ for (Index = 0; Index < VTdInfo->VTdEngineCount; Index++) {
+ VtdUnitInfo = &VTdInfo->VtdUnitInfo[Index];
+ if (VtdUnitInfo->Done) {
+ continue;
+ }
+ VtdUnitBaseAddress = VtdUnitInfo->VtdUnitBaseAddress;
+ DEBUG ((DEBUG_INFO, "VTd Engine: 0x%08X\n", VtdUnitBaseAddress));
+
+ VtdUnitInfo->VerReg.Uint32 = MmioRead32 (VtdUnitBaseAddress + R_VER_REG);
+ VtdUnitInfo->CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ VtdUnitInfo->ECapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ DEBUG ((DEBUG_INFO, " VER_REG : 0x%08X\n", VtdUnitInfo->VerReg.Uint32));
+ DEBUG ((DEBUG_INFO, " CAP_REG : 0x%016lX\n", VtdUnitInfo->CapReg.Uint64));
+ DEBUG ((DEBUG_INFO, " ECAP_REG : 0x%016lX\n", VtdUnitInfo->ECapReg.Uint64));
+
+ Status = VtdCheckUsing5LevelPaging (VTdInfo->HostAddressWidth, VtdUnitInfo->CapReg, &(VtdUnitInfo->Is5LevelPaging));
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "!!!! Page-table type 0x%X is not supported!!!!\n", VtdUnitInfo->CapReg.Bits.SAGAW));
+ return Status;
+ }
+
+ Status = PerpareCacheInvalidationInterface(&VTdInfo->VtdUnitInfo[Index]);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Dump VTd registers if there is error.
+**/
+VOID
+DumpVtdIfError (
+ VOID
+ )
+{
+ VTD_INFO *VTdInfo;
+ UINTN Num;
+ UINTN VtdUnitBaseAddress;
+ UINT16 Index;
+ VTD_REGESTER_THIN_INFO *VtdRegInfo;
+ VTD_FRCD_REG FrcdReg;
+ VTD_CAP_REG CapReg;
+ UINT32 FstsReg32;
+ UINT32 FectlReg32;
+ BOOLEAN HasError;
+
+ VTdInfo = GetVTdInfoHob ();
+ if (VTdInfo == NULL) {
+ return;
+ }
+
+ VtdRegInfo = VTdInfo->RegsInfoBuffer;
+ if (VtdRegInfo == NULL) {
+ return;
+ }
+
+ for (Num = 0; Num < VTdInfo->VTdEngineCount; Num++) {
+ HasError = FALSE;
+ VtdUnitBaseAddress = VTdInfo->VtdUnitInfo[Num].VtdUnitBaseAddress;
+ FstsReg32 = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);
+ if (FstsReg32 != 0) {
+ HasError = TRUE;
+ }
+ FectlReg32 = MmioRead32 (VtdUnitBaseAddress + R_FECTL_REG);
+ if ((FectlReg32 & BIT30) != 0) {
+ HasError = TRUE;
+ }
+
+ CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ for (Index = 0; Index < (UINT16) CapReg.Bits.NFR + 1; Index++) {
+ FrcdReg.Uint64[0] = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG));
+ FrcdReg.Uint64[1] = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ if (FrcdReg.Bits.F != 0) {
+ HasError = TRUE;
+ break;
+ }
+ }
+
+ if (HasError) {
+ DEBUG ((DEBUG_INFO, "\n#### ERROR ####\n"));
+
+ VtdRegInfo->BaseAddress = VtdUnitBaseAddress;
+ VtdRegInfo->GstsReg = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ VtdRegInfo->RtaddrReg = MmioRead64 (VtdUnitBaseAddress + R_RTADDR_REG);;
+ VtdRegInfo->FstsReg = FstsReg32;
+ VtdRegInfo->FectlReg = FectlReg32;
+ VtdRegInfo->IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+
+ CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ for (Index = 0; Index < (UINT16) CapReg.Bits.NFR + 1; Index++) {
+ VtdRegInfo->FrcdReg[Index].Uint64Lo = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG));
+ VtdRegInfo->FrcdReg[Index].Uint64Hi = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ }
+ VtdRegInfo->FrcdRegNum = Index;
+
+ DEBUG ((DEBUG_INFO, "\n#### ERROR ####\n"));
+
+ VtdLibDumpVtdRegsThin (NULL, NULL, VtdRegInfo);
+
+ DEBUG ((DEBUG_INFO, "#### ERROR ####\n\n"));
+
+ VTdLogAddDataEvent (VTDLOG_PEI_REGISTER, VTDLOG_REGISTER_THIN, VtdRegInfo, sizeof (VTD_REGESTER_THIN_INFO) + sizeof (VTD_UINT128) * (VtdRegInfo->FrcdRegNum - 1));
+
+ //
+ // Clear
+ //
+ for (Index = 0; Index < (UINT16) CapReg.Bits.NFR + 1; Index++) {
+ FrcdReg.Uint64[1] = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ if (FrcdReg.Bits.F != 0) {
+ //
+ // Software writes the value read from this field (F) to Clear it.
+ //
+ MmioWrite64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)), FrcdReg.Uint64[1]);
+ }
+ }
+ MmioWrite32 (VtdUnitBaseAddress + R_FSTS_REG, MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG));
+ }
+ }
+}
\ No newline at end of file
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/TranslationTable.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/TranslationTable.c
new file mode 100644
index 000000000..03a4544a0
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/TranslationTable.c
@@ -0,0 +1,926 @@
+/** @file
+
+ Copyright (c) 2020 - 2021, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi.h>
+#include <PiPei.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/IoLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PeiServicesLib.h>
+#include <Library/HobLib.h>
+#include <IndustryStandard/Vtd.h>
+#include <Ppi/IoMmu.h>
+#include <Ppi/VtdInfo.h>
+#include <Ppi/MemoryDiscovered.h>
+#include <Ppi/EndOfPeiPhase.h>
+#include <Guid/VtdPmrInfoHob.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Guid/VtdLogDataHob.h>
+#include "IntelVTdCorePei.h"
+
+#define ALIGN_VALUE_UP(Value, Alignment) (((Value) + (Alignment) - 1) & (~((Alignment) - 1)))
+#define ALIGN_VALUE_LOW(Value, Alignment) ((Value) & (~((Alignment) - 1)))
+
+/**
+ Allocate zero pages.
+
+ @param[in] Pages the number of pages.
+
+ @return the page address.
+ @retval NULL No resource to allocate pages.
+**/
+VOID *
+EFIAPI
+AllocateZeroPages (
+ IN UINTN Pages
+ )
+{
+ VOID *Addr;
+
+ Addr = AllocatePages (Pages);
+ if (Addr == NULL) {
+ return NULL;
+ }
+ ZeroMem (Addr, EFI_PAGES_TO_SIZE (Pages));
+ return Addr;
+}
+
+/**
+ Set second level paging entry attribute based upon IoMmuAccess.
+
+ @param[in] PtEntry The paging entry.
+ @param[in] IoMmuAccess The IOMMU access.
+**/
+VOID
+SetSecondLevelPagingEntryAttribute (
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PtEntry,
+ IN UINT64 IoMmuAccess
+ )
+{
+ PtEntry->Bits.Read = ((IoMmuAccess & EDKII_IOMMU_ACCESS_READ) != 0);
+ PtEntry->Bits.Write = ((IoMmuAccess & EDKII_IOMMU_ACCESS_WRITE) != 0);
+ DEBUG ((DEBUG_VERBOSE, "SetSecondLevelPagingEntryAttribute - 0x%x - 0x%x\n", PtEntry, IoMmuAccess));
+}
+
+/**
+ Create second level paging entry table.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] SecondLevelPagingEntry The second level paging entry.
+ @param[in] MemoryBase The base of the memory.
+ @param[in] MemoryLimit The limit of the memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @return The second level paging entry.
+**/
+VTD_SECOND_LEVEL_PAGING_ENTRY *
+CreateSecondLevelPagingEntryTable (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 MemoryBase,
+ IN UINT64 MemoryLimit,
+ IN UINT64 IoMmuAccess
+ )
+{
+ UINTN Index5;
+ UINTN Index4;
+ UINTN Index3;
+ UINTN Index2;
+ UINTN Lvl5Start;
+ UINTN Lvl5End;
+ UINTN Lvl4PagesStart;
+ UINTN Lvl4PagesEnd;
+ UINTN Lvl4Start;
+ UINTN Lvl4End;
+ UINTN Lvl3Start;
+ UINTN Lvl3End;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl5PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl4PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl3PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl2PtEntry;
+ UINT64 BaseAddress;
+ UINT64 EndAddress;
+ BOOLEAN Is5LevelPaging;
+
+ if (MemoryLimit == 0) {
+ return NULL;
+ }
+
+ Lvl4PagesStart = 0;
+ Lvl4PagesEnd = 0;
+ Lvl4PtEntry = NULL;
+ Lvl5PtEntry = NULL;
+
+ BaseAddress = ALIGN_VALUE_LOW (MemoryBase, SIZE_2MB);
+ EndAddress = ALIGN_VALUE_UP (MemoryLimit, SIZE_2MB);
+ DEBUG ((DEBUG_INFO, "CreateSecondLevelPagingEntryTable: BaseAddress - 0x%016lx, EndAddress - 0x%016lx\n", BaseAddress, EndAddress));
+
+ if (SecondLevelPagingEntry == NULL) {
+ SecondLevelPagingEntry = AllocateZeroPages (1);
+ if (SecondLevelPagingEntry == NULL) {
+ DEBUG ((DEBUG_ERROR, "Could not Alloc LVL4 or LVL5 PT. \n"));
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) SecondLevelPagingEntry, EFI_PAGES_TO_SIZE (1));
+ }
+
+ DEBUG ((DEBUG_INFO, " SecondLevelPagingEntry:0x%016lx\n", (UINT64) (UINTN) SecondLevelPagingEntry));
+ //
+ // If no access is needed, just create not present entry.
+ //
+ if (IoMmuAccess == 0) {
+ DEBUG ((DEBUG_INFO, " SecondLevelPagingEntry:0x%016lx Access 0\n", (UINT64) (UINTN) SecondLevelPagingEntry));
+ return SecondLevelPagingEntry;
+ }
+
+ Is5LevelPaging = VtdUnitInfo->Is5LevelPaging;
+
+ if (Is5LevelPaging) {
+ Lvl5Start = RShiftU64 (BaseAddress, 48) & 0x1FF;
+ Lvl5End = RShiftU64 (EndAddress - 1, 48) & 0x1FF;
+ DEBUG ((DEBUG_INFO, " Lvl5Start - 0x%x, Lvl5End - 0x%x\n", Lvl5Start, Lvl5End));
+
+ Lvl4Start = RShiftU64 (BaseAddress, 39) & 0x1FF;
+ Lvl4End = RShiftU64 (EndAddress - 1, 39) & 0x1FF;
+
+ Lvl4PagesStart = (Lvl5Start<<9) | Lvl4Start;
+ Lvl4PagesEnd = (Lvl5End<<9) | Lvl4End;
+ DEBUG ((DEBUG_INFO, " Lvl4PagesStart - 0x%x, Lvl4PagesEnd - 0x%x\n", Lvl4PagesStart, Lvl4PagesEnd));
+
+ Lvl5PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) SecondLevelPagingEntry;
+ } else {
+ Lvl5Start = RShiftU64 (BaseAddress, 48) & 0x1FF;
+ Lvl5End = Lvl5Start;
+
+ Lvl4Start = RShiftU64 (BaseAddress, 39) & 0x1FF;
+ Lvl4End = RShiftU64 (EndAddress - 1, 39) & 0x1FF;
+ DEBUG ((DEBUG_INFO, " Lvl4Start - 0x%x, Lvl4End - 0x%x\n", Lvl4Start, Lvl4End));
+
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) SecondLevelPagingEntry;
+ }
+
+ for (Index5 = Lvl5Start; Index5 <= Lvl5End; Index5++) {
+ if (Is5LevelPaging) {
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ Lvl5PtEntry[Index5].Uint64 = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index5));
+ ASSERT (FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) Lvl5PtEntry[Index5].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl5PtEntry[Index5], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+ Lvl4Start = Lvl4PagesStart & 0x1FF;
+ if (((Index5+1)<<9) > Lvl4PagesEnd) {
+ Lvl4End = SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) - 1;;
+ Lvl4PagesStart = (Index5+1)<<9;
+ } else {
+ Lvl4End = Lvl4PagesEnd & 0x1FF;
+ }
+ DEBUG ((DEBUG_INFO, " Lvl5(0x%x): Lvl4Start - 0x%x, Lvl4End - 0x%x\n", Index5, Lvl4Start, Lvl4End));
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) (UINTN) VTD_64BITS_ADDRESS(Lvl5PtEntry[Index5].Bits.AddressLo, Lvl5PtEntry[Index5].Bits.AddressHi);
+ }
+
+ for (Index4 = Lvl4Start; Index4 <= Lvl4End; Index4++) {
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ Lvl4PtEntry[Index4].Uint64 = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) Lvl4PtEntry[Index4].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl4PtEntry[Index4], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+
+ Lvl3Start = RShiftU64 (BaseAddress, 30) & 0x1FF;
+ if (ALIGN_VALUE_LOW(BaseAddress + SIZE_1GB, SIZE_1GB) <= EndAddress) {
+ Lvl3End = SIZE_4KB / sizeof (VTD_SECOND_LEVEL_PAGING_ENTRY) - 1;
+ } else {
+ Lvl3End = RShiftU64 (EndAddress - 1, 30) & 0x1FF;
+ }
+ DEBUG ((DEBUG_INFO, " Lvl4(0x%x): Lvl3Start - 0x%x, Lvl3End - 0x%x\n", Index4, Lvl3Start, Lvl3End));
+
+ Lvl3PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) (UINTN) VTD_64BITS_ADDRESS(Lvl4PtEntry[Index4].Bits.AddressLo, Lvl4PtEntry[Index4].Bits.AddressHi);
+ for (Index3 = Lvl3Start; Index3 <= Lvl3End; Index3++) {
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ Lvl3PtEntry[Index3].Uint64 = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL3 PAGE FAIL (0x%x, 0x%x)!!!!!!\n", Index4, Index3));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) Lvl3PtEntry[Index3].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl3PtEntry[Index3], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+
+ Lvl2PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) (UINTN) VTD_64BITS_ADDRESS(Lvl3PtEntry[Index3].Bits.AddressLo, Lvl3PtEntry[Index3].Bits.AddressHi);
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index2++) {
+ Lvl2PtEntry[Index2].Uint64 = BaseAddress;
+ SetSecondLevelPagingEntryAttribute (&Lvl2PtEntry[Index2], IoMmuAccess);
+ Lvl2PtEntry[Index2].Bits.PageSize = 1;
+ BaseAddress += SIZE_2MB;
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) Lvl2PtEntry, SIZE_4KB);
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &Lvl3PtEntry[Lvl3Start], (UINTN) &Lvl3PtEntry[Lvl3End + 1] - (UINTN) &Lvl3PtEntry[Lvl3Start]);
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &Lvl4PtEntry[Lvl4Start], (UINTN) &Lvl4PtEntry[Lvl4End + 1] - (UINTN) &Lvl4PtEntry[Lvl4Start]);
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &Lvl5PtEntry[Lvl5Start], (UINTN) &Lvl5PtEntry[Lvl5End + 1] - (UINTN) &Lvl5PtEntry[Lvl5Start]);
+
+ DEBUG ((DEBUG_INFO, " SecondLevelPagingEntry:0x%016lx\n", (UINT64) (UINTN) SecondLevelPagingEntry));
+ return SecondLevelPagingEntry;
+}
+
+/**
+ Create context entry.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+
+ @retval EFI_SUCCESS The context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create context entry.
+
+**/
+EFI_STATUS
+CreateContextEntry (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINTN RootPages;
+ UINTN ContextPages;
+ UINTN EntryTablePages;
+ VOID *Buffer;
+ UINTN RootIndex;
+ UINTN ContextIndex;
+ VTD_ROOT_ENTRY *RootEntryBase;
+ VTD_ROOT_ENTRY *RootEntry;
+ VTD_CONTEXT_ENTRY *ContextEntryTable;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+ VTD_SOURCE_ID SourceId;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+ UINT64 Pt;
+
+ if (VtdUnitInfo->RootEntryTable != 0) {
+ return EFI_SUCCESS;
+ }
+
+ RootPages = EFI_SIZE_TO_PAGES (sizeof (VTD_ROOT_ENTRY) * VTD_ROOT_ENTRY_NUMBER);
+ ContextPages = EFI_SIZE_TO_PAGES (sizeof (VTD_CONTEXT_ENTRY) * VTD_CONTEXT_ENTRY_NUMBER);
+ EntryTablePages = RootPages + ContextPages * (VTD_ROOT_ENTRY_NUMBER);
+ Buffer = AllocateZeroPages (EntryTablePages);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_ERROR, "Could not Alloc Root Entry Table.. \n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ DEBUG ((DEBUG_ERROR, "RootEntryTable address - 0x%x\n", Buffer));
+ VtdUnitInfo->RootEntryTable = (UINTN) Buffer;
+ VtdUnitInfo->RootEntryTablePageSize = EntryTablePages;
+ RootEntryBase = (VTD_ROOT_ENTRY *) Buffer;
+ Buffer = (UINT8 *) Buffer + EFI_PAGES_TO_SIZE (RootPages);
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry == 0) {
+ DEBUG ((DEBUG_ERROR, "FixedSecondLevelPagingEntry is empty\n"));
+ ASSERT(FALSE);
+ }
+
+ for (RootIndex = 0; RootIndex < VTD_ROOT_ENTRY_NUMBER; RootIndex++) {
+ SourceId.Index.RootIndex = (UINT8) RootIndex;
+
+ RootEntry = &RootEntryBase[SourceId.Index.RootIndex];
+ RootEntry->Bits.ContextTablePointerLo = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 12);
+ RootEntry->Bits.ContextTablePointerHi = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 32);
+ RootEntry->Bits.Present = 1;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (ContextPages);
+ ContextEntryTable = (VTD_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS(RootEntry->Bits.ContextTablePointerLo, RootEntry->Bits.ContextTablePointerHi);
+
+ for (ContextIndex = 0; ContextIndex < VTD_CONTEXT_ENTRY_NUMBER; ContextIndex++) {
+ SourceId.Index.ContextIndex = (UINT8) ContextIndex;
+ ContextEntry = &ContextEntryTable[SourceId.Index.ContextIndex];
+
+ ContextEntry->Bits.TranslationType = 0;
+ ContextEntry->Bits.FaultProcessingDisable = 0;
+ ContextEntry->Bits.Present = 0;
+
+ ContextEntry->Bits.AddressWidth = VtdUnitInfo->Is5LevelPaging ? 0x3 : 0x2;
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry != 0) {
+ SecondLevelPagingEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) VtdUnitInfo->FixedSecondLevelPagingEntry;
+ Pt = (UINT64)RShiftU64 ((UINT64) (UINTN) SecondLevelPagingEntry, 12);
+ ContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ContextEntry->Bits.DomainIdentifier = ((1 << (UINT8)((UINTN)VtdUnitInfo->CapReg.Bits.ND * 2 + 4)) - 1);
+ ContextEntry->Bits.Present = 1;
+ }
+ }
+ }
+
+ FlushPageTableMemory (VtdUnitInfo, VtdUnitInfo->RootEntryTable, EFI_PAGES_TO_SIZE(EntryTablePages));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Create extended context entry.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+
+ @retval EFI_SUCCESS The extended context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create extended context entry.
+**/
+EFI_STATUS
+CreateExtContextEntry (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINTN RootPages;
+ UINTN ContextPages;
+ UINTN EntryTablePages;
+ VOID *Buffer;
+ UINTN RootIndex;
+ UINTN ContextIndex;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntryBase;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntry;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntryTable;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+ VTD_SOURCE_ID SourceId;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+ UINT64 Pt;
+
+ if (VtdUnitInfo->ExtRootEntryTable != 0) {
+ return EFI_SUCCESS;
+ }
+
+ RootPages = EFI_SIZE_TO_PAGES (sizeof (VTD_EXT_ROOT_ENTRY) * VTD_ROOT_ENTRY_NUMBER);
+ ContextPages = EFI_SIZE_TO_PAGES (sizeof (VTD_EXT_CONTEXT_ENTRY) * VTD_CONTEXT_ENTRY_NUMBER);
+ EntryTablePages = RootPages + ContextPages * (VTD_ROOT_ENTRY_NUMBER);
+ Buffer = AllocateZeroPages (EntryTablePages);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_INFO, "Could not Alloc Root Entry Table !\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ DEBUG ((DEBUG_ERROR, "ExtRootEntryTable address - 0x%x\n", Buffer));
+ VtdUnitInfo->ExtRootEntryTable = (UINTN) Buffer;
+ VtdUnitInfo->ExtRootEntryTablePageSize = EntryTablePages;
+ ExtRootEntryBase = (VTD_EXT_ROOT_ENTRY *) Buffer;
+ Buffer = (UINT8 *) Buffer + EFI_PAGES_TO_SIZE (RootPages);
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry == 0) {
+ DEBUG ((DEBUG_ERROR, "FixedSecondLevelPagingEntry is empty\n"));
+ ASSERT(FALSE);
+ }
+
+ for (RootIndex = 0; RootIndex < VTD_ROOT_ENTRY_NUMBER; RootIndex++) {
+ SourceId.Index.RootIndex = (UINT8)RootIndex;
+
+ ExtRootEntry = &ExtRootEntryBase[SourceId.Index.RootIndex];
+ ExtRootEntry->Bits.LowerContextTablePointerLo = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 12);
+ ExtRootEntry->Bits.LowerContextTablePointerHi = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 32);
+ ExtRootEntry->Bits.LowerPresent = 1;
+ ExtRootEntry->Bits.UpperContextTablePointerLo = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 12) + 1;
+ ExtRootEntry->Bits.UpperContextTablePointerHi = (UINT32) RShiftU64 (RShiftU64 ((UINT64) (UINTN) Buffer, 12) + 1, 20);
+ ExtRootEntry->Bits.UpperPresent = 1;
+ Buffer = (UINT8 *) Buffer + EFI_PAGES_TO_SIZE (ContextPages);
+ ExtContextEntryTable = (VTD_EXT_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS (ExtRootEntry->Bits.LowerContextTablePointerLo, ExtRootEntry->Bits.LowerContextTablePointerHi);
+
+ for (ContextIndex = 0; ContextIndex < VTD_CONTEXT_ENTRY_NUMBER; ContextIndex++) {
+ SourceId.Index.ContextIndex = (UINT8) ContextIndex;
+ ExtContextEntry = &ExtContextEntryTable[SourceId.Index.ContextIndex];
+
+ ExtContextEntry->Bits.TranslationType = 0;
+ ExtContextEntry->Bits.FaultProcessingDisable = 0;
+ ExtContextEntry->Bits.Present = 0;
+
+ ExtContextEntry->Bits.AddressWidth = VtdUnitInfo->Is5LevelPaging ? 0x3 : 0x2;
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry != 0) {
+ SecondLevelPagingEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) VtdUnitInfo->FixedSecondLevelPagingEntry;
+ Pt = (UINT64)RShiftU64 ((UINT64) (UINTN) SecondLevelPagingEntry, 12);
+
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ExtContextEntry->Bits.DomainIdentifier = ((1 << (UINT8) ((UINTN) VtdUnitInfo->CapReg.Bits.ND * 2 + 4)) - 1);
+ ExtContextEntry->Bits.Present = 1;
+ }
+ }
+ }
+
+ FlushPageTableMemory (VtdUnitInfo, VtdUnitInfo->ExtRootEntryTable, EFI_PAGES_TO_SIZE(EntryTablePages));
+
+ return EFI_SUCCESS;
+}
+
+#define VTD_PG_R BIT0
+#define VTD_PG_W BIT1
+#define VTD_PG_X BIT2
+#define VTD_PG_EMT (BIT3 | BIT4 | BIT5)
+#define VTD_PG_TM (BIT62)
+
+#define VTD_PG_PS BIT7
+
+#define PAGE_PROGATE_BITS (VTD_PG_TM | VTD_PG_EMT | VTD_PG_W | VTD_PG_R)
+
+#define PAGING_4K_MASK 0xFFF
+#define PAGING_2M_MASK 0x1FFFFF
+#define PAGING_1G_MASK 0x3FFFFFFF
+
+#define PAGING_VTD_INDEX_MASK 0x1FF
+
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
+
+typedef enum {
+ PageNone,
+ Page4K,
+ Page2M,
+ Page1G,
+} PAGE_ATTRIBUTE;
+
+typedef struct {
+ PAGE_ATTRIBUTE Attribute;
+ UINT64 Length;
+ UINT64 AddressMask;
+} PAGE_ATTRIBUTE_TABLE;
+
+PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
+ {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64},
+ {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64},
+ {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64},
+};
+
+/**
+ Return length according to page attributes.
+
+ @param[in] PageAttributes The page attribute of the page entry.
+
+ @return The length of page entry.
+**/
+UINTN
+PageAttributeToLength (
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINTN Index;
+ for (Index = 0; Index < sizeof (mPageAttributeTable) / sizeof (mPageAttributeTable[0]); Index++) {
+ if (PageAttribute == mPageAttributeTable[Index].Attribute) {
+ return (UINTN) mPageAttributeTable[Index].Length;
+ }
+ }
+ return 0;
+}
+
+/**
+ Return page table entry to match the address.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] Address The address to be checked.
+ @param[out] PageAttributes The page attribute of the page entry.
+
+ @return The page entry.
+**/
+VOID *
+GetSecondLevelPageTableEntry (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN PHYSICAL_ADDRESS Address,
+ OUT PAGE_ATTRIBUTE *PageAttribute
+ )
+{
+ UINTN Index1;
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINTN Index5;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ UINT64 *L5PageTable;
+ BOOLEAN Is5LevelPaging;
+
+ Index5 = ((UINTN) RShiftU64 (Address, 48)) & PAGING_VTD_INDEX_MASK;
+ Index4 = ((UINTN) RShiftU64 (Address, 39)) & PAGING_VTD_INDEX_MASK;
+ Index3 = ((UINTN) Address >> 30) & PAGING_VTD_INDEX_MASK;
+ Index2 = ((UINTN) Address >> 21) & PAGING_VTD_INDEX_MASK;
+ Index1 = ((UINTN) Address >> 12) & PAGING_VTD_INDEX_MASK;
+
+ Is5LevelPaging = VtdUnitInfo->Is5LevelPaging;
+
+ if (Is5LevelPaging) {
+ L5PageTable = (UINT64 *) SecondLevelPagingEntry;
+ if (L5PageTable[Index5] == 0) {
+ L5PageTable[Index5] = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (L5PageTable[Index5] == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL5 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) L5PageTable[Index5], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *) &L5PageTable[Index5], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &L5PageTable[Index5], sizeof(L5PageTable[Index5]));
+ }
+ L4PageTable = (UINT64 *) (UINTN) (L5PageTable[Index5] & PAGING_4K_ADDRESS_MASK_64);
+ } else {
+ L4PageTable = (UINT64 *)SecondLevelPagingEntry;
+ }
+
+ if (L4PageTable[Index4] == 0) {
+ L4PageTable[Index4] = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (L4PageTable[Index4] == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) L4PageTable[Index4], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *) &L4PageTable[Index4], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &L4PageTable[Index4], sizeof(L4PageTable[Index4]));
+ }
+
+ L3PageTable = (UINT64 *) (UINTN) (L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
+ if (L3PageTable[Index3] == 0) {
+ L3PageTable[Index3] = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (L3PageTable[Index3] == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL3 PAGE FAIL (0x%x, 0x%x)!!!!!!\n", Index4, Index3));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) L3PageTable[Index3], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *) &L3PageTable[Index3], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &L3PageTable[Index3], sizeof (L3PageTable[Index3]));
+ }
+ if ((L3PageTable[Index3] & VTD_PG_PS) != 0) {
+ // 1G
+ *PageAttribute = Page1G;
+ return &L3PageTable[Index3];
+ }
+
+ L2PageTable = (UINT64 *) (UINTN) (L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable[Index2] == 0) {
+ L2PageTable[Index2] = Address & PAGING_2M_ADDRESS_MASK_64;
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *) &L2PageTable[Index2], 0);
+ L2PageTable[Index2] |= VTD_PG_PS;
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &L2PageTable[Index2], sizeof (L2PageTable[Index2]));
+ }
+ if ((L2PageTable[Index2] & VTD_PG_PS) != 0) {
+ // 2M
+ *PageAttribute = Page2M;
+ return &L2PageTable[Index2];
+ }
+
+ // 4k
+ L1PageTable = (UINT64 *) (UINTN) (L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
+ if ((L1PageTable[Index1] == 0) && (Address != 0)) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ *PageAttribute = Page4K;
+ return &L1PageTable[Index1];
+}
+
+/**
+ Modify memory attributes of page entry.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] PageEntry The page entry.
+ @param[in] IoMmuAccess The IOMMU access.
+ @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
+**/
+VOID
+ConvertSecondLevelPageEntryAttribute (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry,
+ IN UINT64 IoMmuAccess,
+ OUT BOOLEAN *IsModified
+ )
+{
+ UINT64 CurrentPageEntry;
+ UINT64 NewPageEntry;
+
+ CurrentPageEntry = PageEntry->Uint64;
+ SetSecondLevelPagingEntryAttribute (PageEntry, IoMmuAccess);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) PageEntry, sizeof(*PageEntry));
+ NewPageEntry = PageEntry->Uint64;
+ if (CurrentPageEntry != NewPageEntry) {
+ *IsModified = TRUE;
+ DEBUG ((DEBUG_VERBOSE, "ConvertSecondLevelPageEntryAttribute 0x%lx", CurrentPageEntry));
+ DEBUG ((DEBUG_VERBOSE, "->0x%lx\n", NewPageEntry));
+ } else {
+ *IsModified = FALSE;
+ }
+}
+
+/**
+ This function returns if there is need to split page entry.
+
+ @param[in] BaseAddress The base address to be checked.
+ @param[in] Length The length to be checked.
+ @param[in] PageAttribute The page attribute of the page entry.
+
+ @retval SplitAttributes on if there is need to split page entry.
+**/
+PAGE_ATTRIBUTE
+NeedSplitPage (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINT64 PageEntryLength;
+
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+
+ if (((BaseAddress & (PageEntryLength - 1)) == 0) && (Length >= PageEntryLength)) {
+ return PageNone;
+ }
+
+ if (((BaseAddress & PAGING_2M_MASK) != 0) || (Length < SIZE_2MB)) {
+ return Page4K;
+ }
+
+ return Page2M;
+}
+
+/**
+ This function splits one page entry to small page entries.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] PageEntry The page entry to be splitted.
+ @param[in] PageAttribute The page attribute of the page entry.
+ @param[in] SplitAttribute How to split the page entry.
+
+ @retval RETURN_SUCCESS The page entry is splitted.
+ @retval RETURN_UNSUPPORTED The page entry does not support to be splitted.
+ @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
+**/
+RETURN_STATUS
+SplitSecondLevelPage (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry,
+ IN PAGE_ATTRIBUTE PageAttribute,
+ IN PAGE_ATTRIBUTE SplitAttribute
+ )
+{
+ UINT64 BaseAddress;
+ UINT64 *NewPageEntry;
+ UINTN Index;
+
+ ASSERT (PageAttribute == Page2M || PageAttribute == Page1G);
+
+ if (PageAttribute == Page2M) {
+ //
+ // Split 2M to 4K
+ //
+ ASSERT (SplitAttribute == Page4K);
+ if (SplitAttribute == Page4K) {
+ NewPageEntry = AllocateZeroPages (1);
+ DEBUG ((DEBUG_INFO, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = PageEntry->Uint64 & PAGING_2M_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_4KB * Index) | (PageEntry->Uint64 & PAGE_PROGATE_BITS);
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN)NewPageEntry, SIZE_4KB);
+
+ PageEntry->Uint64 = (UINT64)(UINTN)NewPageEntry;
+ SetSecondLevelPagingEntryAttribute (PageEntry, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN)PageEntry, sizeof(*PageEntry));
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else if (PageAttribute == Page1G) {
+ //
+ // Split 1G to 2M
+ // No need support 1G->4K directly, we should use 1G->2M, then 2M->4K to get more compact page table.
+ //
+ ASSERT (SplitAttribute == Page2M || SplitAttribute == Page4K);
+ if ((SplitAttribute == Page2M || SplitAttribute == Page4K)) {
+ NewPageEntry = AllocateZeroPages (1);
+ DEBUG ((DEBUG_INFO, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = PageEntry->Uint64 & PAGING_1G_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_2MB * Index) | VTD_PG_PS | (PageEntry->Uint64 & PAGE_PROGATE_BITS);
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN)NewPageEntry, SIZE_4KB);
+
+ PageEntry->Uint64 = (UINT64)(UINTN)NewPageEntry;
+ SetSecondLevelPagingEntryAttribute (PageEntry, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN)PageEntry, sizeof(*PageEntry));
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+}
+
+/**
+ Set VTd attribute for a system memory on second level page entry
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetSecondLevelPagingAttribute (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry;
+ PAGE_ATTRIBUTE PageAttribute;
+ UINTN PageEntryLength;
+ PAGE_ATTRIBUTE SplitAttribute;
+ EFI_STATUS Status;
+ BOOLEAN IsEntryModified;
+
+ DEBUG ((DEBUG_INFO, "SetSecondLevelPagingAttribute (0x%016lx - 0x%016lx : %x) \n", BaseAddress, Length, IoMmuAccess));
+ DEBUG ((DEBUG_INFO, " SecondLevelPagingEntry Base - 0x%x\n", SecondLevelPagingEntry));
+
+ if (BaseAddress != ALIGN_VALUE(BaseAddress, SIZE_4KB)) {
+ DEBUG ((DEBUG_ERROR, "SetSecondLevelPagingAttribute - Invalid Alignment\n"));
+ return EFI_UNSUPPORTED;
+ }
+ if (Length != ALIGN_VALUE(Length, SIZE_4KB)) {
+ DEBUG ((DEBUG_ERROR, "SetSecondLevelPagingAttribute - Invalid Alignment\n"));
+ return EFI_UNSUPPORTED;
+ }
+
+ while (Length != 0) {
+ PageEntry = GetSecondLevelPageTableEntry (VtdUnitInfo, SecondLevelPagingEntry, BaseAddress, &PageAttribute);
+ if (PageEntry == NULL) {
+ DEBUG ((DEBUG_ERROR, "PageEntry - NULL\n"));
+ return RETURN_UNSUPPORTED;
+ }
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+ SplitAttribute = NeedSplitPage (BaseAddress, Length, PageAttribute);
+ if (SplitAttribute == PageNone) {
+ ConvertSecondLevelPageEntryAttribute (VtdUnitInfo, PageEntry, IoMmuAccess, &IsEntryModified);
+ //
+ // Convert success, move to next
+ //
+ BaseAddress += PageEntryLength;
+ Length -= PageEntryLength;
+ } else {
+ Status = SplitSecondLevelPage (VtdUnitInfo, PageEntry, PageAttribute, SplitAttribute);
+ if (RETURN_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "SplitSecondLevelPage - %r\n", Status));
+ return RETURN_UNSUPPORTED;
+ }
+ //
+ // Just split current page
+ // Convert success in next around
+ //
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Create Fixed Second Level Paging Entry.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCES Setup translation table fail.
+
+**/
+EFI_STATUS
+CreateFixedSecondLevelPagingEntry (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ EFI_STATUS Status;
+ UINT64 IoMmuAccess;
+ UINT64 BaseAddress;
+ UINT64 Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry != 0) {
+ return EFI_SUCCESS;
+ }
+
+ VtdUnitInfo->FixedSecondLevelPagingEntry = (UINTN) CreateSecondLevelPagingEntryTable (VtdUnitInfo, NULL, 0, SIZE_4GB, 0);
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry == 0) {
+ DEBUG ((DEBUG_ERROR, "FixedSecondLevelPagingEntry is empty\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA (Hob);
+ BaseAddress = DmaBufferInfo->DmaBufferBase;
+ Length = DmaBufferInfo->DmaBufferSize;
+ IoMmuAccess = EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE;
+
+ DEBUG ((DEBUG_INFO, " BaseAddress = 0x%lx\n", BaseAddress));
+ DEBUG ((DEBUG_INFO, " Length = 0x%lx\n", Length));
+ DEBUG ((DEBUG_INFO, " IoMmuAccess = 0x%lx\n", IoMmuAccess));
+
+ Status = SetSecondLevelPagingAttribute (VtdUnitInfo, (VTD_SECOND_LEVEL_PAGING_ENTRY*) VtdUnitInfo->FixedSecondLevelPagingEntry, BaseAddress, Length, IoMmuAccess);
+
+ return Status;
+}
+/**
+ Setup VTd translation table.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCES Setup translation table fail.
+
+**/
+EFI_STATUS
+SetupTranslationTable (
+ IN VTD_INFO *VTdInfo
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+ VTD_UNIT_INFO *VtdUnitInfo;
+
+ for (Index = 0; Index < VTdInfo->VTdEngineCount; Index++) {
+ VtdUnitInfo = &VTdInfo->VtdUnitInfo[Index];
+ if (VtdUnitInfo->Done) {
+ continue;
+ }
+
+ Status = CreateFixedSecondLevelPagingEntry (VtdUnitInfo);
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_INFO, "CreateFixedSecondLevelPagingEntry failed - %r\n", Status));
+ return Status;
+ }
+
+ if (VtdUnitInfo->ECapReg.Bits.SMTS) {
+ if (VtdUnitInfo->ECapReg.Bits.DEP_24) {
+ DEBUG ((DEBUG_ERROR,"ECapReg.bit24 is not zero\n"));
+ ASSERT(FALSE);
+ Status = EFI_UNSUPPORTED;
+ } else {
+ Status = CreateContextEntry (VtdUnitInfo);
+ }
+ } else {
+ if (VtdUnitInfo->ECapReg.Bits.DEP_24) {
+ //
+ // To compatible with pervious VTd engine
+ // It was ECS(Extended Context Support) bit.
+ //
+ Status = CreateExtContextEntry (VtdUnitInfo);
+ } else {
+ Status = CreateContextEntry (VtdUnitInfo);
+ }
+ }
+
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ }
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Include/Guid/VtdLogDataHob.h b/Silicon/Intel/IntelSiliconPkg/Include/Guid/VtdLogDataHob.h
new file mode 100644
index 000000000..cfddce995
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Include/Guid/VtdLogDataHob.h
@@ -0,0 +1,151 @@
+/** @file
+ The definition for VTD Log Data Hob.
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+**/
+
+
+#ifndef _VTD_LOG_DATA_HOB_H_
+#define _VTD_LOG_DATA_HOB_H_
+
+#include <IndustryStandard/Vtd.h>
+
+#define VTDLOG_LOG_TYPE(_id_) ((UINT64) 1 << (_id_))
+
+typedef enum {
+ VTDLOG_PEI_BASIC = 0, // Start ID for PEI basic log
+ VTDLOG_PEI_PRE_MEM_DMA_PROTECT = 1, // PRE-MEM phase
+ VTDLOG_PEI_PMR_LOW_MEMORY_RANGE = 2,
+ VTDLOG_PEI_PMR_HIGH_MEMORY_RANGE = 3,
+ VTDLOG_PEI_PROTECT_MEMORY_RANGE = 4,
+ VTDLOG_PEI_POST_MEM_ENABLE_DMA_PROTECT = 5,
+ VTDLOG_PEI_POST_MEM_DISABLE_DMA_PROTECT = 6,
+ VTDLOG_PEI_QUEUED_INVALIDATION = 7,
+ VTDLOG_PEI_REGISTER = 8,
+ VTDLOG_PEI_VTD_ERROR = 9,
+
+ VTDLOG_PEI_ADVANCED = 16, // Start ID for PEI advanced log
+ VTDLOG_PEI_PPI_ALLOC_BUFFER = 17,
+ VTDLOG_PEI_PPI_MAP = 18,
+
+ VTDLOG_DXE_BASIC = 24, // Start ID for DXE basic log
+ VTDLOG_DXE_DMAR_TABLE = 25,
+ VTDLOG_DXE_SETUP_VTD = 26,
+ VTDLOG_DXE_PCI_DEVICE = 27,
+ VTDLOG_DXE_REGISTER = 28,
+ VTDLOG_DXE_ENABLE_DMAR = 29,
+ VTDLOG_DXE_DISABLE_DMAR = 30,
+ VTDLOG_DXE_DISABLE_PMR = 31,
+ VTDLOG_DXE_INSTALL_IOMMU_PROTOCOL = 32,
+ VTDLOG_DXE_QUEUED_INVALIDATION = 33,
+
+ VTDLOG_DXE_ADVANCED = 44, // Start ID for DXE advanced log
+ VTDLOG_DXE_IOMMU_ALLOC_BUFFER = 45,
+ VTDLOG_DXE_IOMMU_FREE_BUFFER = 46,
+ VTDLOG_DXE_IOMMU_MAP = 47,
+ VTDLOG_DXE_IOMMU_UNMAP = 48,
+ VTDLOG_DXE_IOMMU_SET_ATTRIBUTE = 49,
+ VTDLOG_DXE_ROOT_TABLE = 50,
+} VTDLOG_EVENT_TYPE;
+
+#define VTD_LOG_PEI_PRE_MEM_BAR_MAX 8
+
+//
+// Code of VTDLOG_PEI_BASIC / VTDLOG_DXE_BASIC
+//
+#define VTD_LOG_ERROR_BUFFER_FULL (1<<0)
+
+//
+// Code of VTDLOG_PEI_PRE_MEM_DMA_PROTECT_MODE
+//
+#define VTD_LOG_PEI_PRE_MEM_NOT_USED 0
+#define VTD_LOG_PEI_PRE_MEM_DISABLE 1
+#define VTD_LOG_PEI_PRE_MEM_ADM 2
+#define VTD_LOG_PEI_PRE_MEM_TE 3
+#define VTD_LOG_PEI_PRE_MEM_PMR 4
+
+//
+// Code of VTDLOG_PEI_QUEUED_INVALIDATION
+//
+#define VTD_LOG_QI_DISABLE 0
+#define VTD_LOG_QI_ENABLE 1
+#define VTD_LOG_QI_ERROR_OUT_OF_RESOURCES 2
+
+//
+// Code of VTDLOG_PEI_VTD_ERROR
+//
+#define VTD_LOG_PEI_VTD_ERROR_PPI_ALLOC 1
+#define VTD_LOG_PEI_VTD_ERROR_PPI_MAP 2
+
+// Code of VTDLOG_PEI_REGISTER / VTDLOG_DXE_REGISTER
+#define VTDLOG_REGISTER_ALL 0
+#define VTDLOG_REGISTER_THIN 1
+#define VTDLOG_REGISTER_QI 2
+
+#pragma pack(1)
+
+//
+// Item head
+//
+typedef struct {
+ UINT32 DataSize;
+ UINT64 LogType;
+ UINT64 Timestamp;
+}VTDLOG_EVENT_HEADER;
+
+//
+// Struct for type = VTDLOG_PEI_REGISTER
+// VTDLOG_DXE_REGISTER
+// VTDLOG_DXE_DMAR_TABLE
+// VTDLOG_DXE_IOMMU_SET_ATTRIBUTE
+// VTDLOG_DXE_PCI_DEVICE
+// VTDLOG_DXE_ROOT_TABLE
+//
+typedef struct {
+ VTDLOG_EVENT_HEADER Header;
+ UINT64 Param;
+ UINT8 Data[1];
+} VTDLOG_EVENT_CONTEXT;
+
+//
+// Struct for rest of the types
+//
+typedef struct {
+ VTDLOG_EVENT_HEADER Header;
+ UINT64 Data1;
+ UINT64 Data2;
+}VTDLOG_EVENT_2PARAM;
+
+//
+// Struct for VTd log event
+//
+typedef union{
+ VTDLOG_EVENT_HEADER EventHeader;
+ VTDLOG_EVENT_2PARAM CommenEvent;
+ VTDLOG_EVENT_CONTEXT ContextEvent;
+} VTDLOG_EVENT;
+
+//
+// Information for PEI pre-memory phase
+//
+typedef struct {
+ UINT8 Mode;
+ UINT8 Status;
+ UINT32 BarAddress;
+} VTDLOG_PEI_PRE_MEM_INFO;
+
+//
+// Buffer struct for PEI phase
+//
+typedef struct {
+ UINT8 VtdLogPeiError;
+ VTDLOG_PEI_PRE_MEM_INFO PreMemInfo[VTD_LOG_PEI_PRE_MEM_BAR_MAX];
+ UINT32 PostMemBufferUsed;
+ UINT64 PostMemBuffer;
+} VTDLOG_PEI_BUFFER_HOB;
+
+#pragma pack()
+
+#endif // _VTD_LOG_DATA_HOB_H_
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Include/Library/IntelVTdPeiDxeLib.h b/Silicon/Intel/IntelSiliconPkg/Include/Library/IntelVTdPeiDxeLib.h
new file mode 100644
index 000000000..c0a137a77
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Include/Library/IntelVTdPeiDxeLib.h
@@ -0,0 +1,329 @@
+/** @file
+ Intel VTd library definitions.
+
+ Copyright (c) 2023 Intel Corporation. All rights reserved. <BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+**/
+#ifndef _INTEL_VTD_PEI_DXE_LIB_H_
+#define _INTEL_VTD_PEI_DXE_LIB_H_
+
+//
+// Include files
+//
+#include <Uefi/UefiBaseType.h>
+#include <Library/DebugLib.h>
+#include <Protocol/VtdLog.h>
+#include <Protocol/PlatformVtdPolicy.h>
+
+#if defined (EXT_CALLBACK)
+ #define _VTDLIB_DEBUG(PrintLevel, ...) \
+ do { \
+ VtdLogEventCallback (Context, CallbackHandle, PrintLevel, ##__VA_ARGS__); \
+ } while (FALSE)
+ #define VTDLIB_DEBUG(Expression) _VTDLIB_DEBUG Expression
+#else
+ #define VTDLIB_DEBUG(Expression) DEBUG(Expression)
+#endif
+
+#pragma pack(1)
+
+typedef struct {
+ UINT8 DeviceType;
+ VTD_SOURCE_ID PciSourceId;
+ EDKII_PLATFORM_VTD_PCI_DEVICE_ID PciDeviceId;
+ // for statistic analysis
+ UINT64 AccessCount;
+} PCI_DEVICE_DATA;
+
+typedef struct {
+ BOOLEAN IncludeAllFlag;
+ UINT16 Segment;
+ UINT32 PciDeviceDataMaxNumber;
+ UINT32 PciDeviceDataNumber;
+ PCI_DEVICE_DATA PciDeviceData[1];
+} PCI_DEVICE_INFORMATION;
+
+typedef struct {
+ UINT64 Uint64Lo;
+ UINT64 Uint64Hi;
+}VTD_UINT128;
+
+typedef struct {
+ UINT64 BaseAddress;
+ UINT32 VerReg;
+ UINT64 CapReg;
+ UINT64 EcapReg;
+ UINT32 GstsReg;
+ UINT64 RtaddrReg;
+ UINT64 CcmdReg;
+ UINT32 FstsReg;
+ UINT32 FectlReg;
+ UINT32 FedataReg;
+ UINT32 FeaddrReg;
+ UINT32 FeuaddrReg;
+ UINT64 IqercdReg;
+ UINT64 IvaReg;
+ UINT64 IotlbReg;
+ UINT16 FrcdRegNum; // Number of FRCD Registers
+ VTD_UINT128 FrcdReg[1];
+} VTD_REGESTER_INFO;
+
+typedef struct {
+ UINT64 BaseAddress;
+ UINT32 FstsReg;
+ UINT64 IqercdReg;
+} VTD_REGESTER_QI_INFO;
+
+typedef struct {
+ UINT64 BaseAddress;
+ UINT32 GstsReg;
+ UINT64 RtaddrReg;
+ UINT32 FstsReg;
+ UINT32 FectlReg;
+ UINT64 IqercdReg;
+ UINT16 FrcdRegNum; // Number of FRCD Registers
+ VTD_UINT128 FrcdReg[1];
+} VTD_REGESTER_THIN_INFO;
+
+typedef struct {
+ VTD_SOURCE_ID SourceId;
+ EFI_PHYSICAL_ADDRESS DeviceAddress;
+ UINT64 Length;
+ UINT64 IoMmuAccess;
+ EFI_STATUS Status;
+} VTD_PROTOCOL_SET_ATTRIBUTE;
+
+typedef struct {
+ UINT64 BaseAddress;
+ UINT64 TableAddress;
+ BOOLEAN Is5LevelPaging;
+} VTD_ROOT_TABLE_INFO;
+
+#pragma pack()
+
+/**
+ Callback function of VTd lib handle strings.
+
+ @param[in] Context Context
+ @param[in] ErrorLevel The error level of the debug message.
+ @param[in] Buffer Event string
+**/
+typedef
+VOID
+(EFIAPI *EDKII_VTD_LIB_STRING_CB) (
+ IN VOID *Context,
+ IN UINTN ErrorLevel,
+ IN CHAR8 *Buffer
+ );
+
+/**
+ Dump DMAR ACPI table.
+
+ @param[in] Context Event Context
+ @param[in out] CallbackHandle Callback Handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmar (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ );
+
+/**
+ Dump DRHD DMAR ACPI table.
+
+ @param[in] Context Event Context
+ @param[in out] CallbackHandle Callback Handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmarDrhd (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ );
+
+/**
+ Dump the PCI device information managed by this VTd engine.
+
+ @param[in] Context Event Context
+ @param[in out] CallbackHandle Callback Handler
+ @param[in] PciDeviceInfo PCI device information
+**/
+VOID
+VtdLibDumpPciDeviceInfo (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN PCI_DEVICE_INFORMATION *PciDeviceInfo
+ );
+
+/**
+ Dump DMAR context entry table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] RootEntry DMAR root entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpDmarContextEntryTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_ROOT_ENTRY *RootEntry,
+ IN BOOLEAN Is5LevelPaging
+ );
+
+/**
+ Dump DMAR extended context entry table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] ExtRootEntry DMAR extended root entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpDmarExtContextEntryTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_EXT_ROOT_ENTRY *ExtRootEntry,
+ IN BOOLEAN Is5LevelPaging
+ );
+
+/**
+ Dump VTd registers.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers Information
+**/
+VOID
+VtdLibDumpVtdRegsAll (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_INFO *VtdRegInfo
+ );
+
+/**
+ Dump VTd registers.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers Information
+**/
+VOID
+VtdLibDumpVtdRegsThin (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_THIN_INFO *VtdRegInfo
+ );
+
+/**
+ Decode log event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event Event struct
+
+ @retval TRUE Decode event success
+ @retval FALSE Unknown event
+**/
+BOOLEAN
+VtdLibDecodeEvent (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT *Event
+ );
+
+/**
+ Flush VTd engine write buffer.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+VtdLibFlushWriteBuffer (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ Clear Global Command Register Bits
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] BitMask Bit mask.
+**/
+VOID
+VtdLibClearGlobalCommandRegisterBits (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT32 BitMask
+ );
+
+/**
+ Set Global Command Register Bits
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] BitMask Bit mask.
+**/
+VOID
+VtdLibSetGlobalCommandRegisterBits (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT32 BitMask
+ );
+
+/**
+ Disable DMAR translation.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS DMAR translation is disabled.
+**/
+EFI_STATUS
+VtdLibDisableDmar (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ Disable PMR.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS PMR is disabled.
+ @retval EFI_UNSUPPORTED PMR is not supported.
+ @retval EFI_NOT_STARTED PMR was not enabled.
+**/
+EFI_STATUS
+VtdLibDisablePmr (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ Disable queued invalidation interface.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+VtdLibDisableQueuedInvalidationInterface (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ Submit the queued invalidation descriptor to the remapping
+ hardware unit and wait for its completion.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Desc The invalidate descriptor
+ @param[in] ClearFaultBits Clear Error bits
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval RETURN_DEVICE_ERROR A fault is detected.
+ @retval EFI_INVALID_PARAMETER Parameter is invalid.
+ @retval EFI_DEVICE_ERROR Detect fault, need to clear fault bits if ClearFaultBits is FALSE
+**/
+EFI_STATUS
+VtdLibSubmitQueuedInvalidationDescriptor (
+ IN UINTN VtdUnitBaseAddress,
+ IN VOID *Desc,
+ IN BOOLEAN ClearFaultBits
+ );
+
+#endif
diff --git a/Silicon/Intel/IntelSiliconPkg/Include/Protocol/VtdLog.h b/Silicon/Intel/IntelSiliconPkg/Include/Protocol/VtdLog.h
new file mode 100644
index 000000000..7c2894e81
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Include/Protocol/VtdLog.h
@@ -0,0 +1,59 @@
+/** @file
+ The definition for VTD Log.
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef __VTD_LOG_PROTOCOL_H__
+#define __VTD_LOG_PROTOCOL_H__
+
+#include <Guid/VtdLogDataHob.h>
+
+#define EDKII_VTD_LOG_PROTOCOL_GUID \
+ { \
+ 0x1e271819, 0xa3ca, 0x481f, { 0xbd, 0xff, 0x92, 0x78, 0x2f, 0x9a, 0x99, 0x3c } \
+ }
+
+typedef struct _EDKII_VTD_LOG_PROTOCOL EDKII_VTD_LOG_PROTOCOL;
+
+#define EDKII_VTD_LOG_PROTOCOL_REVISION 0x00010000
+
+/**
+ Callback function of each VTd log event.
+ @param[in] Context Event context
+ @param[in] Header Event header
+
+ @retval UINT32 Number of events
+**/
+typedef
+VOID
+(EFIAPI *EDKII_VTD_LOG_HANDLE_EVENT) (
+ IN VOID *Context,
+ IN VTDLOG_EVENT_HEADER *Header
+ );
+
+/**
+ Get the VTd log events.
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+
+ @retval UINT32 Number of events
+**/
+typedef
+UINT64
+(EFIAPI *EDKII_VTD_LOG_GET_EVENTS) (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ );
+
+struct _EDKII_VTD_LOG_PROTOCOL {
+ UINT64 Revision;
+ EDKII_VTD_LOG_GET_EVENTS GetEvents;
+};
+
+extern EFI_GUID gEdkiiVTdLogProtocolGuid;
+
+#endif
+
diff --git a/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dec b/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dec
index cad22acda..ec8690a8d 100644
--- a/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dec
+++ b/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dec
@@ -73,6 +73,9 @@
## HOB GUID to get memory information after MRC is done. The hob data will be used to set the PMR ranges
gVtdPmrInfoDataHobGuid = {0x6fb61645, 0xf168, 0x46be, { 0x80, 0xec, 0xb5, 0x02, 0x38, 0x5e, 0xe7, 0xe7 } }
+ ## HOB GUID to get VTd log data.
+ gVTdLogBufferHobGuid = {0xc8049121, 0xdf91, 0x4dfd, { 0xad, 0xcb, 0x1c, 0x55, 0x85, 0x09, 0x6d, 0x3b } }
+
## Include/Guid/MicrocodeShadowInfoHob.h
gEdkiiMicrocodeShadowInfoHobGuid = { 0x658903f9, 0xda66, 0x460d, { 0x8b, 0xb0, 0x9d, 0x2d, 0xdf, 0x65, 0x44, 0x59 } }
@@ -119,6 +122,8 @@
gPchSmmSpi2ProtocolGuid = { 0x2d1c0c43, 0x20d3, 0x40ae, { 0x99, 0x07, 0x2d, 0xf0, 0xe7, 0x91, 0x21, 0xa5 } }
gEdkiiPlatformVTdPolicyProtocolGuid = { 0x3d17e448, 0x466, 0x4e20, { 0x99, 0x9f, 0xb2, 0xe1, 0x34, 0x88, 0xee, 0x22 }}
+ gEdkiiVTdLogProtocolGuid = { 0x1e271819, 0xa3ca, 0x481f, { 0xbd, 0xff, 0x92, 0x78, 0x2f, 0x9a, 0x99, 0x3c }}
+
gIntelDieInfoProtocolGuid = { 0xAED8A0A1, 0xFDE6, 0x4CF2, { 0xA3, 0x85, 0x08, 0xF1, 0x25, 0xF2, 0x40, 0x37 }}
## Protocol for device security policy.
@@ -207,3 +212,19 @@
# non-zero: The size of an additional NVS region following the Regular variable region.<BR>
# @Prompt Additional NVS Region Size.
gIntelSiliconPkgTokenSpaceGuid.PcdFlashNvStorageAdditionalSize|0x00000000|UINT32|0x0000000F
+
+ ## Declares VTd LOG Output Level.<BR><BR>
+ # 0 : Disable VTd Log
+ # 1 : Enable Basic Log
+ # 2 : Enable All Log
+ # @Prompt The VTd Log Output Level.
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdLogLevel|0x02|UINT8|0x00000017
+
+ ## Declares VTd PEI POST-MEM LOG buffer size.<BR><BR>
+ # @Prompt The VTd PEI Post-Mem Log buffer size. 8k
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiPostMemLogBufferSize|0x00002000|UINT32|0x00000019
+
+ ## Declares VTd DXE LOG buffer size.<BR><BR>
+ # @Prompt The VTd DXE Log buffer size. 4M
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdDxeLogBufferSize|0x00400000|UINT32|0x0000001A
+
diff --git a/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dsc b/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dsc
index 170eb480a..c8ff40b38 100644
--- a/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dsc
+++ b/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dsc
@@ -45,6 +45,7 @@
UefiBootServicesTableLib|MdePkg/Library/UefiBootServicesTableLib/UefiBootServicesTableLib.inf
UefiDriverEntryPoint|MdePkg/Library/UefiDriverEntryPoint/UefiDriverEntryPoint.inf
VariableFlashInfoLib|MdeModulePkg/Library/BaseVariableFlashInfoLib/BaseVariableFlashInfoLib.inf
+ IntelVTdPeiDxeLib|IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf
[LibraryClasses.common.PEIM]
PeimEntryPoint|MdePkg/Library/PeimEntryPoint/PeimEntryPoint.inf
diff --git a/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c
new file mode 100644
index 000000000..d56d899c5
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c
@@ -0,0 +1,1810 @@
+/** @file
+ Source code file for Intel VTd PEI DXE library.
+
+Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/PrintLib.h>
+#include <Library/IoLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/IntelVTdPeiDxeLib.h>
+#include <IndustryStandard/Vtd.h>
+
+//
+// Define the maximum message length that this library supports
+//
+#define MAX_STRING_LENGTH (0x100)
+
+#define VTD_64BITS_ADDRESS(Lo, Hi) (LShiftU64 (Lo, 12) | LShiftU64 (Hi, 32))
+
+/**
+ Produces a Null-terminated ASCII string in an output buffer based on a Null-terminated
+ ASCII format string and variable argument list.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] ErrorLevel The error level of the debug message.
+ @param[in] FormatString A Null-terminated ASCII format string.
+ @param[in] ... Variable argument list whose contents are accessed based on the format string specified by FormatString.
+
+ @return The number of ASCII characters in the produced output buffer not including the
+ Null-terminator.
+**/
+UINTN
+EFIAPI
+VtdLogEventCallback (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN UINTN ErrorLevel,
+ IN CONST CHAR8 *FormatString,
+ ...
+ )
+{
+ CHAR8 Buffer[MAX_STRING_LENGTH];
+ VA_LIST Marker;
+ UINTN NumberOfPrinted;
+
+ if ((CallbackHandle == NULL) || (FormatString == NULL)) {
+ return 0;
+ }
+
+ VA_START (Marker, FormatString);
+ NumberOfPrinted = AsciiVSPrint (Buffer, sizeof (Buffer), FormatString, Marker);
+ VA_END (Marker);
+
+ if (NumberOfPrinted > 0) {
+ CallbackHandle (Context, ErrorLevel, Buffer);
+ }
+
+ return NumberOfPrinted;
+}
+
+/**
+ Dump DMAR DeviceScopeEntry.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] DmarDeviceScopeEntry DMAR DeviceScopeEntry
+**/
+VOID
+VtdLibDumpDmarDeviceScopeEntry (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry
+ )
+{
+ UINTN PciPathNumber;
+ UINTN PciPathIndex;
+ EFI_ACPI_DMAR_PCI_PATH *PciPath;
+
+ if (DmarDeviceScopeEntry == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * DMA-Remapping Device Scope Entry Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " DMAR Device Scope Entry address ...................... 0x%016lx\n" :
+ " DMAR Device Scope Entry address ...................... 0x%08x\n",
+ DmarDeviceScopeEntry
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Device Scope Entry Type ............................ 0x%02x\n",
+ DmarDeviceScopeEntry->Type
+ ));
+ switch (DmarDeviceScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " PCI Endpoint Device\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " PCI Sub-hierachy\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_IOAPIC:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " IOAPIC\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_MSI_CAPABLE_HPET:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " MSI Capable HPET\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_ACPI_NAMESPACE_DEVICE:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ACPI Namespace Device\n"
+ ));
+ break;
+ default:
+ break;
+ }
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................. 0x%02x\n",
+ DmarDeviceScopeEntry->Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Enumeration ID ..................................... 0x%02x\n",
+ DmarDeviceScopeEntry->EnumerationId
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Starting Bus Number ................................ 0x%02x\n",
+ DmarDeviceScopeEntry->StartBusNumber
+ ));
+
+ PciPathNumber = (DmarDeviceScopeEntry->Length - sizeof(EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER)) / sizeof(EFI_ACPI_DMAR_PCI_PATH);
+ PciPath = (EFI_ACPI_DMAR_PCI_PATH *)(DmarDeviceScopeEntry + 1);
+ for (PciPathIndex = 0; PciPathIndex < PciPathNumber; PciPathIndex++) {
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Device ............................................. 0x%02x\n",
+ PciPath[PciPathIndex].Device
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Function ........................................... 0x%02x\n",
+ PciPath[PciPathIndex].Function
+ ));
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR SIDP table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Sidp DMAR SIDP table
+**/
+VOID
+VtdLibDumpDmarSidp (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_SIDP_HEADER *Sidp
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN SidpLen;
+
+ if (Sidp == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * SoC Integrated Device Property Reporting Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " SIDP address ........................................... 0x%016lx\n" :
+ " SIDP address ........................................... 0x%08x\n",
+ Sidp
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Sidp->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Sidp->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Sidp->SegmentNumber
+ ));
+
+ SidpLen = Sidp->Header.Length - sizeof(EFI_ACPI_DMAR_SIDP_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Sidp + 1);
+ while (SidpLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ SidpLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR SATC table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Satc DMAR SATC table
+**/
+VOID
+VtdLibDumpDmarSatc (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_SATC_HEADER *Satc
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN SatcLen;
+
+ if (Satc == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * ACPI Soc Integrated Address Translation Cache reporting Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " SATC address ........................................... 0x%016lx\n" :
+ " SATC address ........................................... 0x%08x\n",
+ Satc
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Satc->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Satc->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Satc->Flags
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Satc->SegmentNumber
+ ));
+
+ SatcLen = Satc->Header.Length - sizeof(EFI_ACPI_DMAR_SATC_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Satc + 1);
+ while (SatcLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ SatcLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR ANDD table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Andd DMAR ANDD table
+**/
+VOID
+VtdLibDumpDmarAndd (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_ANDD_HEADER *Andd
+ )
+{
+ if (Andd == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * ACPI Name-space Device Declaration Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " ANDD address ........................................... 0x%016lx\n" :
+ " ANDD address ........................................... 0x%08x\n",
+ Andd
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Andd->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Andd->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ACPI Device Number ................................... 0x%02x\n",
+ Andd->AcpiDeviceNumber
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ACPI Object Name ..................................... '%a'\n",
+ (Andd + 1)
+ ));
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR RHSA table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Rhsa DMAR RHSA table
+**/
+VOID
+VtdLibDumpDmarRhsa (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_RHSA_HEADER *Rhsa
+ )
+{
+ if (Rhsa == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * Remapping Hardware Status Affinity Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " RHSA address ........................................... 0x%016lx\n" :
+ " RHSA address ........................................... 0x%08x\n",
+ Rhsa
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Rhsa->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Rhsa->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Register Base Address ................................ 0x%016lx\n",
+ Rhsa->RegisterBaseAddress
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Proximity Domain ..................................... 0x%08x\n",
+ Rhsa->ProximityDomain
+ ));
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR ATSR table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Atsr DMAR ATSR table
+**/
+VOID
+VtdLibDumpDmarAtsr (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_ATSR_HEADER *Atsr
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN AtsrLen;
+
+ if (Atsr == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * Root Port ATS Capability Reporting Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " ATSR address ........................................... 0x%016lx\n" :
+ " ATSR address ........................................... 0x%08x\n",
+ Atsr
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Atsr->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Atsr->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Atsr->Flags
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ALL_PORTS .......................................... 0x%02x\n",
+ Atsr->Flags & EFI_ACPI_DMAR_ATSR_FLAGS_ALL_PORTS
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Atsr->SegmentNumber
+ ));
+
+ AtsrLen = Atsr->Header.Length - sizeof(EFI_ACPI_DMAR_ATSR_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Atsr + 1);
+ while (AtsrLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ AtsrLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR RMRR table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Rmrr DMAR RMRR table
+**/
+VOID
+VtdLibDumpDmarRmrr (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_RMRR_HEADER *Rmrr
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN RmrrLen;
+
+ if (Rmrr == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * Reserved Memory Region Reporting Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " RMRR address ........................................... 0x%016lx\n" :
+ " RMRR address ........................................... 0x%08x\n",
+ Rmrr
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Rmrr->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Rmrr->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Rmrr->SegmentNumber
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Reserved Memory Region Base Address .................. 0x%016lx\n",
+ Rmrr->ReservedMemoryRegionBaseAddress
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Reserved Memory Region Limit Address ................. 0x%016lx\n",
+ Rmrr->ReservedMemoryRegionLimitAddress
+ ));
+
+ RmrrLen = Rmrr->Header.Length - sizeof(EFI_ACPI_DMAR_RMRR_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Rmrr + 1);
+ while (RmrrLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ RmrrLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR DRHD table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Drhd DMAR DRHD table
+**/
+VOID
+VtdLibDumpDmarDrhd (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *Drhd
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN DrhdLen;
+
+ if (Drhd == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * DMA-Remapping Hardware Definition Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " DRHD address ........................................... 0x%016lx\n" :
+ " DRHD address ........................................... 0x%08x\n",
+ Drhd
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Drhd->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Drhd->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Drhd->Flags
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " INCLUDE_PCI_ALL .................................... 0x%02x\n",
+ Drhd->Flags & EFI_ACPI_DMAR_DRHD_FLAGS_INCLUDE_PCI_ALL
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Size ................................................. 0x%02x\n",
+ Drhd->Size
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Drhd->SegmentNumber
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Register Base Address ................................ 0x%016lx\n",
+ Drhd->RegisterBaseAddress
+ ));
+
+ DrhdLen = Drhd->Header.Length - sizeof(EFI_ACPI_DMAR_DRHD_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Drhd + 1);
+ while (DrhdLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ DrhdLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump Header of DMAR ACPI table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmarHeader (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ )
+{
+ //
+ // Dump Dmar table
+ //
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "* DMAR Table *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n"
+ ));
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ "DMAR address ............................................. 0x%016lx\n" :
+ "DMAR address ............................................. 0x%08x\n",
+ Dmar
+ ));
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Table Contents:\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Host Address Width ................................... 0x%02x\n",
+ Dmar->HostAddressWidth
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Dmar->Flags
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " INTR_REMAP ......................................... 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_INTR_REMAP
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " X2APIC_OPT_OUT_SET ................................. 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_X2APIC_OPT_OUT
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " DMA_CTRL_PLATFORM_OPT_IN_FLAG ...................... 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_DMA_CTRL_PLATFORM_OPT_IN_FLAG
+ ));
+}
+
+/**
+ Dump DMAR ACPI table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmar (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ INTN DmarLen;
+
+ if (Dmar == NULL) {
+ return;
+ }
+
+ //
+ // Dump Dmar table
+ //
+ VtdLibDumpAcpiDmarHeader (Context, CallbackHandle, Dmar);
+
+ DmarLen = Dmar->Header.Length - sizeof(EFI_ACPI_DMAR_HEADER);
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)(Dmar + 1);
+ while (DmarLen > 0) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ VtdLibDumpDmarDrhd (Context, CallbackHandle, (EFI_ACPI_DMAR_DRHD_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_RMRR:
+ VtdLibDumpDmarRmrr (Context, CallbackHandle, (EFI_ACPI_DMAR_RMRR_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_ATSR:
+ VtdLibDumpDmarAtsr (Context, CallbackHandle, (EFI_ACPI_DMAR_ATSR_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_RHSA:
+ VtdLibDumpDmarRhsa (Context, CallbackHandle, (EFI_ACPI_DMAR_RHSA_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_ANDD:
+ VtdLibDumpDmarAndd (Context, CallbackHandle, (EFI_ACPI_DMAR_ANDD_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_SATC:
+ VtdLibDumpDmarSatc (Context, CallbackHandle, (EFI_ACPI_DMAR_SATC_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_SIDP:
+ VtdLibDumpDmarSidp (Context, CallbackHandle, (EFI_ACPI_DMAR_SIDP_HEADER *)DmarHeader);
+ break;
+ default:
+ break;
+ }
+ DmarLen -= DmarHeader->Length;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DRHD DMAR ACPI table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmarDrhd (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ INTN DmarLen;
+
+ if (Dmar == NULL) {
+ return;
+ }
+
+ //
+ // Dump Dmar table
+ //
+ VtdLibDumpAcpiDmarHeader (Context, CallbackHandle, Dmar);
+
+ DmarLen = Dmar->Header.Length - sizeof(EFI_ACPI_DMAR_HEADER);
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)(Dmar + 1);
+ while (DmarLen > 0) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ VtdLibDumpDmarDrhd (Context, CallbackHandle, (EFI_ACPI_DMAR_DRHD_HEADER *)DmarHeader);
+ break;
+ default:
+ break;
+ }
+ DmarLen -= DmarHeader->Length;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump the PCI device information managed by this VTd engine.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] PciDeviceInfo VTd Unit Information
+**/
+VOID
+VtdLibDumpPciDeviceInfo (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN PCI_DEVICE_INFORMATION *PciDeviceInfo
+ )
+{
+ UINTN Index;
+
+ if (PciDeviceInfo != NULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "PCI Device Information (Number 0x%x, IncludeAll - %d):\n",
+ PciDeviceInfo->PciDeviceDataNumber,
+ PciDeviceInfo->IncludeAllFlag
+ ));
+ for (Index = 0; Index < PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " S%04x B%02x D%02x F%02x\n",
+ PciDeviceInfo->Segment,
+ PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Bus,
+ PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Device,
+ PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Function
+ ));
+ }
+ }
+}
+
+/**
+ Dump DMAR second level paging entry.
+
+ @param[in] Context Event context
+ @param[in] CallbackHandle Callback handler
+ @param[in] SecondLevelPagingEntry The second level paging entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpSecondLevelPagingEntry (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VOID *SecondLevelPagingEntry,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index5;
+ UINTN Index4;
+ UINTN Index3;
+ UINTN Index2;
+ UINTN Index1;
+ UINTN Lvl5IndexEnd;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl5PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl4PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl3PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl2PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl1PtEntry;
+
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, "================\n"));
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, "DMAR Second Level Page Table:\n"));
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, "SecondLevelPagingEntry Base - 0x%x, Is5LevelPaging - %d\n", SecondLevelPagingEntry, Is5LevelPaging));
+
+ Lvl5IndexEnd = Is5LevelPaging ? SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) : 1;
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+ Lvl5PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+
+ for (Index5 = 0; Index5 < Lvl5IndexEnd; Index5++) {
+ if (Is5LevelPaging) {
+ if (Lvl5PtEntry[Index5].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl5Pt Entry(0x%03x) - 0x%016lx\n", Index5, Lvl5PtEntry[Index5].Uint64));
+ }
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ continue;
+ }
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl5PtEntry[Index5].Bits.AddressLo, Lvl5PtEntry[Index5].Bits.AddressHi);
+ }
+
+ for (Index4 = 0; Index4 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index4++) {
+ if (Lvl4PtEntry[Index4].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl4Pt Entry(0x%03x) - 0x%016lx\n", Index4, Lvl4PtEntry[Index4].Uint64));
+ }
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ continue;
+ }
+ Lvl3PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl4PtEntry[Index4].Bits.AddressLo, Lvl4PtEntry[Index4].Bits.AddressHi);
+ for (Index3 = 0; Index3 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index3++) {
+ if (Lvl3PtEntry[Index3].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl3Pt Entry(0x%03x) - 0x%016lx\n", Index3, Lvl3PtEntry[Index3].Uint64));
+ }
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ continue;
+ }
+
+ Lvl2PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl3PtEntry[Index3].Bits.AddressLo, Lvl3PtEntry[Index3].Bits.AddressHi);
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index2++) {
+ if (Lvl2PtEntry[Index2].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl2Pt Entry(0x%03x) - 0x%016lx\n", Index2, Lvl2PtEntry[Index2].Uint64));
+ }
+ if (Lvl2PtEntry[Index2].Uint64 == 0) {
+ continue;
+ }
+ if (Lvl2PtEntry[Index2].Bits.PageSize == 0) {
+ Lvl1PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl2PtEntry[Index2].Bits.AddressLo, Lvl2PtEntry[Index2].Bits.AddressHi);
+ for (Index1 = 0; Index1 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index1++) {
+ if (Lvl1PtEntry[Index1].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl1Pt Entry(0x%03x) - 0x%016lx\n", Index1, Lvl1PtEntry[Index1].Uint64));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, "================\n"));
+}
+
+/**
+ Dump DMAR context entry table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] RootEntry DMAR root entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpDmarContextEntryTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_ROOT_ENTRY *RootEntry,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index;
+ UINTN Index2;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "=========================\n"));
+ VTDLIB_DEBUG ((DEBUG_INFO, "DMAR Context Entry Table:\n"));
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "RootEntry Address - 0x%x\n", RootEntry));
+
+ for (Index = 0; Index < VTD_ROOT_ENTRY_NUMBER; Index++) {
+ if ((RootEntry[Index].Uint128.Uint64Lo != 0) || (RootEntry[Index].Uint128.Uint64Hi != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " RootEntry(0x%02x) B%02x - 0x%016lx %016lx\n",
+ Index, Index, RootEntry[Index].Uint128.Uint64Hi, RootEntry[Index].Uint128.Uint64Lo));
+ }
+ if (RootEntry[Index].Bits.Present == 0) {
+ continue;
+ }
+ ContextEntry = (VTD_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS (RootEntry[Index].Bits.ContextTablePointerLo, RootEntry[Index].Bits.ContextTablePointerHi);
+ for (Index2 = 0; Index2 < VTD_CONTEXT_ENTRY_NUMBER; Index2++) {
+ if ((ContextEntry[Index2].Uint128.Uint64Lo != 0) || (ContextEntry[Index2].Uint128.Uint64Hi != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " ContextEntry(0x%02x) D%02xF%02x - 0x%016lx %016lx\n",
+ Index2, Index2 >> 3, Index2 & 0x7, ContextEntry[Index2].Uint128.Uint64Hi, ContextEntry[Index2].Uint128.Uint64Lo));
+ }
+ if (ContextEntry[Index2].Bits.Present == 0) {
+ continue;
+ }
+ VtdLibDumpSecondLevelPagingEntry (Context, CallbackHandle, (VOID *) (UINTN) VTD_64BITS_ADDRESS (ContextEntry[Index2].Bits.SecondLevelPageTranslationPointerLo, ContextEntry[Index2].Bits.SecondLevelPageTranslationPointerHi), Is5LevelPaging);
+ }
+ }
+ VTDLIB_DEBUG ((DEBUG_INFO, "=========================\n"));
+}
+
+/**
+ Dump DMAR extended context entry table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] ExtRootEntry DMAR extended root entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpDmarExtContextEntryTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_EXT_ROOT_ENTRY *ExtRootEntry,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index;
+ UINTN Index2;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "=========================\n"));
+ VTDLIB_DEBUG ((DEBUG_INFO, "DMAR ExtContext Entry Table:\n"));
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "ExtRootEntry Address - 0x%x\n", ExtRootEntry));
+
+ for (Index = 0; Index < VTD_ROOT_ENTRY_NUMBER; Index++) {
+ if ((ExtRootEntry[Index].Uint128.Uint64Lo != 0) || (ExtRootEntry[Index].Uint128.Uint64Hi != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " ExtRootEntry(0x%02x) B%02x - 0x%016lx %016lx\n",
+ Index, Index, ExtRootEntry[Index].Uint128.Uint64Hi, ExtRootEntry[Index].Uint128.Uint64Lo));
+ }
+ if (ExtRootEntry[Index].Bits.LowerPresent == 0) {
+ continue;
+ }
+ ExtContextEntry = (VTD_EXT_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS (ExtRootEntry[Index].Bits.LowerContextTablePointerLo, ExtRootEntry[Index].Bits.LowerContextTablePointerHi);
+ for (Index2 = 0; Index2 < VTD_CONTEXT_ENTRY_NUMBER/2; Index2++) {
+ if ((ExtContextEntry[Index2].Uint256.Uint64_1 != 0) || (ExtContextEntry[Index2].Uint256.Uint64_2 != 0) ||
+ (ExtContextEntry[Index2].Uint256.Uint64_3 != 0) || (ExtContextEntry[Index2].Uint256.Uint64_4 != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " ExtContextEntryLower(0x%02x) D%02xF%02x - 0x%016lx %016lx %016lx %016lx\n",
+ Index2, Index2 >> 3, Index2 & 0x7, ExtContextEntry[Index2].Uint256.Uint64_4, ExtContextEntry[Index2].Uint256.Uint64_3, ExtContextEntry[Index2].Uint256.Uint64_2, ExtContextEntry[Index2].Uint256.Uint64_1));
+ }
+ if (ExtContextEntry[Index2].Bits.Present == 0) {
+ continue;
+ }
+ VtdLibDumpSecondLevelPagingEntry (Context, CallbackHandle, (VOID *) (UINTN) VTD_64BITS_ADDRESS (ExtContextEntry[Index2].Bits.SecondLevelPageTranslationPointerLo, ExtContextEntry[Index2].Bits.SecondLevelPageTranslationPointerHi), Is5LevelPaging);
+ }
+
+ if (ExtRootEntry[Index].Bits.UpperPresent == 0) {
+ continue;
+ }
+ ExtContextEntry = (VTD_EXT_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS (ExtRootEntry[Index].Bits.UpperContextTablePointerLo, ExtRootEntry[Index].Bits.UpperContextTablePointerHi);
+ for (Index2 = 0; Index2 < VTD_CONTEXT_ENTRY_NUMBER/2; Index2++) {
+ if ((ExtContextEntry[Index2].Uint256.Uint64_1 != 0) || (ExtContextEntry[Index2].Uint256.Uint64_2 != 0) ||
+ (ExtContextEntry[Index2].Uint256.Uint64_3 != 0) || (ExtContextEntry[Index2].Uint256.Uint64_4 != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " ExtContextEntryUpper(0x%02x) D%02xF%02x - 0x%016lx %016lx %016lx %016lx\n",
+ Index2, (Index2 + 128) >> 3, (Index2 + 128) & 0x7, ExtContextEntry[Index2].Uint256.Uint64_4, ExtContextEntry[Index2].Uint256.Uint64_3, ExtContextEntry[Index2].Uint256.Uint64_2, ExtContextEntry[Index2].Uint256.Uint64_1));
+ }
+ if (ExtContextEntry[Index2].Bits.Present == 0) {
+ continue;
+ }
+ }
+ }
+ VTDLIB_DEBUG ((DEBUG_INFO, "=========================\n"));
+}
+
+/**
+ Dump VTd FRCD register.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] FrcdRegNum FRCD Register Number
+ @param[in] FrcdRegTab FRCD Register Table
+**/
+VOID
+VtdLibDumpVtdFrcdRegs (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN UINT16 FrcdRegNum,
+ IN VTD_UINT128 *FrcdRegTab
+ )
+{
+ UINT16 Index;
+ VTD_FRCD_REG FrcdReg;
+ VTD_SOURCE_ID SourceId;
+
+ for (Index = 0; Index < FrcdRegNum; Index++) {
+ FrcdReg.Uint64[0] = FrcdRegTab[Index].Uint64Lo;
+ FrcdReg.Uint64[1] = FrcdRegTab[Index].Uint64Hi;
+ VTDLIB_DEBUG ((DEBUG_INFO, " FRCD_REG[%d] - 0x%016lx %016lx\n", Index, FrcdReg.Uint64[1], FrcdReg.Uint64[0]));
+ if (FrcdReg.Uint64[1] != 0 || FrcdReg.Uint64[0] != 0) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " Fault Info - 0x%016lx\n", VTD_64BITS_ADDRESS(FrcdReg.Bits.FILo, FrcdReg.Bits.FIHi)));
+ VTDLIB_DEBUG ((DEBUG_INFO, " Fault Bit - %d\n", FrcdReg.Bits.F));
+ SourceId.Uint16 = (UINT16)FrcdReg.Bits.SID;
+ VTDLIB_DEBUG ((DEBUG_INFO, " Source - B%02x D%02x F%02x\n", SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ VTDLIB_DEBUG ((DEBUG_INFO, " Type - 0x%02x\n", (FrcdReg.Bits.T1 << 1) | FrcdReg.Bits.T2));
+ VTDLIB_DEBUG ((DEBUG_INFO, " Reason - %x (Refer to VTd Spec, Appendix A)\n", FrcdReg.Bits.FR));
+ }
+ }
+}
+
+/**
+ Dump VTd registers.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers information
+**/
+VOID
+VtdLibDumpVtdRegsAll (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_INFO *VtdRegInfo
+ )
+{
+ if (VtdRegInfo != NULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "VTd Engine: [0x%016lx]\n", VtdRegInfo->BaseAddress));
+ VTDLIB_DEBUG ((DEBUG_INFO, " VER_REG - 0x%08x\n", VtdRegInfo->VerReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " CAP_REG - 0x%016lx\n", VtdRegInfo->CapReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " ECAP_REG - 0x%016lx\n", VtdRegInfo->EcapReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " GSTS_REG - 0x%08x \n", VtdRegInfo->GstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " RTADDR_REG - 0x%016lx\n", VtdRegInfo->RtaddrReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " CCMD_REG - 0x%016lx\n", VtdRegInfo->CcmdReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FSTS_REG - 0x%08x\n", VtdRegInfo->FstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FECTL_REG - 0x%08x\n", VtdRegInfo->FectlReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FEDATA_REG - 0x%08x\n", VtdRegInfo->FedataReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FEADDR_REG - 0x%08x\n", VtdRegInfo->FeaddrReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FEUADDR_REG - 0x%08x\n", VtdRegInfo->FeuaddrReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " IQERCD_REG - 0x%016lx\n", VtdRegInfo->IqercdReg));
+
+ VtdLibDumpVtdFrcdRegs (Context, CallbackHandle, VtdRegInfo->FrcdRegNum, VtdRegInfo->FrcdReg);
+
+ VTDLIB_DEBUG ((DEBUG_INFO, " IVA_REG - 0x%016lx\n", VtdRegInfo->IvaReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " IOTLB_REG - 0x%016lx\n", VtdRegInfo->IotlbReg));
+ }
+}
+
+/**
+ Dump VTd registers.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers information
+**/
+VOID
+VtdLibDumpVtdRegsThin (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_THIN_INFO *VtdRegInfo
+ )
+{
+ if (VtdRegInfo != NULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "VTd Engine: [0x%016lx]\n", VtdRegInfo->BaseAddress));
+ VTDLIB_DEBUG ((DEBUG_INFO, " GSTS_REG - 0x%08x \n", VtdRegInfo->GstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " RTADDR_REG - 0x%016lx\n", VtdRegInfo->RtaddrReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FSTS_REG - 0x%08x\n", VtdRegInfo->FstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FECTL_REG - 0x%08x\n", VtdRegInfo->FectlReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " IQERCD_REG - 0x%016lx\n", VtdRegInfo->IqercdReg));
+
+ VtdLibDumpVtdFrcdRegs (Context, CallbackHandle, VtdRegInfo->FrcdRegNum, VtdRegInfo->FrcdReg);
+ }
+}
+
+/**
+ Dump VTd registers.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers information
+**/
+VOID
+VtdLibDumpVtdRegsQi (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_QI_INFO *VtdRegInfo
+ )
+{
+ if (VtdRegInfo != NULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "VTd Engine: [0x%016lx]\n", VtdRegInfo->BaseAddress));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FSTS_REG - 0x%08x\n", VtdRegInfo->FstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " IQERCD_REG - 0x%016lx\n", VtdRegInfo->IqercdReg));
+ }
+}
+
+/**
+ Dump Vtd PEI pre-mem event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_2PARAM event
+
+**/
+VOID
+VtdLibDumpPeiPreMemInfo (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_2PARAM *Event
+ )
+{
+ UINT64 VtdBarAddress;
+ UINT64 Mode;
+ UINT64 Status;
+
+ VtdBarAddress = Event->Data1;
+ Mode = Event->Data2 & 0xFF;
+ Status = (Event->Data2>>8) & 0xFF;
+
+ switch (Mode) {
+ case VTD_LOG_PEI_PRE_MEM_DISABLE:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Disabled [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ case VTD_LOG_PEI_PRE_MEM_ADM:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Enable Abort DMA Mode [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ case VTD_LOG_PEI_PRE_MEM_TE:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Enable NULL Root Entry Table [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ case VTD_LOG_PEI_PRE_MEM_PMR:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Enable PMR [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ case VTD_LOG_PEI_PRE_MEM_NOT_USED:
+ //
+ // Not used
+ //
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Unknown [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ }
+}
+
+/**
+ Dump Vtd Queued Invaildation event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_2PARAM event
+
+**/
+VOID
+VtdLibDumpQueuedInvaildation (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_2PARAM *Event
+ )
+{
+ switch (Event->Data1) {
+ case VTD_LOG_QI_DISABLE:
+ VTDLIB_DEBUG ((DEBUG_INFO, " [0x%016lx] Disable\n", Event->Data2));
+ break;
+ case VTD_LOG_QI_ENABLE:
+ VTDLIB_DEBUG ((DEBUG_INFO, " [0x%016lx] Enable\n", Event->Data2));
+ break;
+ case VTD_LOG_QI_ERROR_OUT_OF_RESOURCES:
+ VTDLIB_DEBUG ((DEBUG_INFO, " [0x%016lx] error - Out of resources\n", Event->Data2));
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, " [0x%016lx] error - (0x%x)\n", Event->Data2, Event->Data1));
+ break;
+ }
+}
+
+/**
+ Dump Vtd registers event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_CONTEXT event
+
+**/
+VOID
+VtdLibDumpRegisters (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_CONTEXT *Event
+ )
+{
+ switch (Event->Param) {
+ case VTDLOG_REGISTER_ALL:
+ VtdLibDumpVtdRegsAll (Context, CallbackHandle, (VTD_REGESTER_INFO *) Event->Data);
+ break;
+ case VTDLOG_REGISTER_THIN:
+ VtdLibDumpVtdRegsThin (Context, CallbackHandle, (VTD_REGESTER_THIN_INFO *) Event->Data);
+ break;
+ case VTDLOG_REGISTER_QI:
+ VtdLibDumpVtdRegsQi (Context, CallbackHandle, (VTD_REGESTER_QI_INFO *) Event->Data);
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, " Unknown format (%d)\n", Event->Param));
+ break;
+ }
+}
+
+/**
+ Dump Vtd PEI Error event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_2PARAM event
+
+**/
+VOID
+VtdLibDumpPeiError (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_2PARAM *Event
+ )
+{
+ UINT64 Timestamp;
+
+ Timestamp = Event->Header.Timestamp;
+
+ switch (Event->Data1) {
+ case VTD_LOG_PEI_VTD_ERROR_PPI_ALLOC:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Error - PPI alloc length [0x%016lx]\n", Timestamp, Event->Data2));
+ break;
+ case VTD_LOG_PEI_VTD_ERROR_PPI_MAP:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Error - PPI map length [0x%016lx]\n", Timestamp, Event->Data2));
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Error - Unknown (%d) 0x%x\n", Timestamp, Event->Data1, Event->Data2));
+ break;
+ }
+}
+
+/**
+ Dump Vtd registers event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_CONTEXT event
+
+**/
+VOID
+VtdLibDumpSetAttribute (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_CONTEXT *Event
+ )
+{
+ VTD_PROTOCOL_SET_ATTRIBUTE * SetAttributeInfo;
+
+ SetAttributeInfo = (VTD_PROTOCOL_SET_ATTRIBUTE *) Event->Data;
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: SetAttribute SourceId = 0x%04x, Address = 0x%lx, Length = 0x%lx, IoMmuAccess = 0x%lx, %r\n",
+ Event->Header.Timestamp,
+ SetAttributeInfo->SourceId.Uint16,
+ SetAttributeInfo->DeviceAddress,
+ SetAttributeInfo->Length,
+ SetAttributeInfo->Status));
+}
+
+
+
+/**
+ Dump Vtd Root Table event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_CONTEXT event
+
+**/
+VOID
+VtdLibDumpRootTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_CONTEXT *Event
+ )
+{
+ VTD_ROOT_TABLE_INFO *RootTableInfo;
+
+ RootTableInfo = (VTD_ROOT_TABLE_INFO *) Event->Data;
+ if (Event->Param == 0) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Root Entry Table [0x%016lx]\n", Event->Header.Timestamp, RootTableInfo->BaseAddress));
+ VtdLibDumpDmarContextEntryTable (Context, CallbackHandle, (VTD_ROOT_ENTRY *) (UINTN) RootTableInfo->TableAddress, RootTableInfo->Is5LevelPaging);
+
+ } else if (Event->Param == 1) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Ext Root Entry Table [0x%016lx]\n", Event->Header.Timestamp, RootTableInfo->BaseAddress));
+ VtdLibDumpDmarExtContextEntryTable (Context, CallbackHandle, (VTD_EXT_ROOT_ENTRY *) (UINTN) RootTableInfo->TableAddress, RootTableInfo->Is5LevelPaging);
+
+ } else {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Unknown Root Table Type (%d)\n", Event->Header.Timestamp, Event->Param));
+ }
+}
+
+/**
+ Decode log event.
+
+ @param[in] Context Event context
+ @param[in out] PciDeviceId Callback handler
+ @param[in] Event Event struct
+
+ @retval TRUE Decode event success
+ @retval FALSE Unknown event
+**/
+BOOLEAN
+VtdLibDecodeEvent (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT *Event
+ )
+{
+ BOOLEAN Result;
+ UINT64 Timestamp;
+ UINT64 Data1;
+ UINT64 Data2;
+
+ Result = TRUE;
+ Timestamp = Event->EventHeader.Timestamp;
+ Data1 = Event->CommenEvent.Data1;
+ Data2 = Event->CommenEvent.Data2;
+
+ switch (Event->EventHeader.LogType) {
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_BASIC):
+ if (Data1 & VTD_LOG_ERROR_BUFFER_FULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Info : Log Buffer Full\n", Timestamp));
+ Data1 &= ~VTD_LOG_ERROR_BUFFER_FULL;
+ }
+ if (Data1 != 0) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Info : 0x%x, 0x%x\n", Timestamp, Data1, Data2));
+ }
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PRE_MEM_DMA_PROTECT):
+ VtdLibDumpPeiPreMemInfo (Context, CallbackHandle, &(Event->CommenEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PMR_LOW_MEMORY_RANGE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: PMR Low Memory Range [0x%x, 0x%x]\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PMR_HIGH_MEMORY_RANGE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: PMR High Memory Range [0x%016lx, 0x%016lx]\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PROTECT_MEMORY_RANGE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Protected DMA Memory Range [0x%016lx, 0x%016lx]\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_POST_MEM_ENABLE_DMA_PROTECT):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Enable DMA protection [0x%016lx] %r\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_POST_MEM_DISABLE_DMA_PROTECT):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Disable DMA protection [0x%016lx]\n", Timestamp, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_QUEUED_INVALIDATION):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Queued Invalidation", Timestamp));
+ VtdLibDumpQueuedInvaildation (Context, CallbackHandle, &(Event->CommenEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_REGISTER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Dump Registers\n", Timestamp));
+ VtdLibDumpRegisters (Context, CallbackHandle, &(Event->ContextEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_VTD_ERROR):
+ VtdLibDumpPeiError (Context, CallbackHandle, &(Event->CommenEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PPI_ALLOC_BUFFER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: PPI AllocateBuffer 0x%x, Length = 0x%x\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PPI_MAP):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: PPI Map 0x%x, Length = 0x%x\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_BASIC):
+ if (Data1 & VTD_LOG_ERROR_BUFFER_FULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Info : Log Buffer Full\n", Timestamp));
+ Data1 &= ~VTD_LOG_ERROR_BUFFER_FULL;
+ }
+ if (Data1 != 0) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Info : 0x%x, 0x%x\n", Timestamp, Data1, Data2));
+ }
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_DMAR_TABLE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: DMAR Table\n", Timestamp));
+ VtdLibDumpAcpiDmar (Context, CallbackHandle, (EFI_ACPI_DMAR_HEADER *) Event->ContextEvent.Data);
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_SETUP_VTD):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Setup VTd Below/Above 4G Memory Limit = [0x%016lx, 0x%016lx]\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_PCI_DEVICE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: PCI Devices [0x%016lx]\n", Timestamp, Event->ContextEvent.Param));
+ VtdLibDumpPciDeviceInfo (Context, CallbackHandle, (PCI_DEVICE_INFORMATION *) Event->ContextEvent.Data);
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_REGISTER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Dump Registers\n", Timestamp));
+ VtdLibDumpRegisters (Context, CallbackHandle, &(Event->ContextEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_ENABLE_DMAR):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Enable DMAR [0x%016lx]\n", Timestamp, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_DISABLE_DMAR):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Disable DMAR [0x%016lx]\n", Timestamp, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_DISABLE_PMR):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Disable PMR [0x%016lx] %r\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_INSTALL_IOMMU_PROTOCOL):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Install IOMMU Protocol %r\n", Timestamp, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_QUEUED_INVALIDATION):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Queued Invalidation", Timestamp));
+ VtdLibDumpQueuedInvaildation (Context, CallbackHandle, &(Event->CommenEvent));
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_ROOT_TABLE):
+ VtdLibDumpRootTable (Context, CallbackHandle, &(Event->ContextEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_ALLOC_BUFFER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: AllocateBuffer 0x%x, Page = 0x%x\n", Timestamp, Data2, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_FREE_BUFFER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: FreeBuffer 0x%x, Page = 0x%x\n", Timestamp, Data2, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_MAP):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Map 0x%x, Operation = 0x%x\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_UNMAP):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Unmap 0x%x, NumberOfBytes = 0x%x\n", Timestamp, Data2, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_SET_ATTRIBUTE):
+ VtdLibDumpSetAttribute (Context, CallbackHandle, &(Event->ContextEvent));
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, "## Unknown VTd Event Type=%d Timestamp=%ld Size=%d\n", Event->EventHeader.LogType, Event->EventHeader.Timestamp, Event->EventHeader.DataSize));
+ Result = FALSE;
+ break;
+ }
+
+ return Result;
+}
+
+/**
+ Flush VTd engine write buffer.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+VtdLibFlushWriteBuffer (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ UINT32 Reg32;
+ VTD_CAP_REG CapReg;
+
+ CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+
+ if (CapReg.Bits.RWBF != 0) {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32 | B_GMCD_REG_WBF);
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_WBF) != 0);
+ }
+}
+
+/**
+ Clear Global Command Register Bits
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] BitMask Bit mask
+**/
+VOID
+VtdLibClearGlobalCommandRegisterBits (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT32 BitMask
+ )
+{
+ UINT32 Reg32;
+ UINT32 Status;
+ UINT32 Command;
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Status = (Reg32 & 0x96FFFFFF); // Reset the one-shot bits
+ Command = (Status & (~BitMask));
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Command);
+
+ DEBUG ((DEBUG_INFO, "Clear GCMD_REG bits 0x%x.\n", BitMask));
+
+ //
+ // Poll on Status bit of Global status register to become zero
+ //
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & BitMask) == BitMask);
+ DEBUG ((DEBUG_INFO, "GSTS_REG : 0x%08x \n", Reg32));
+}
+
+/**
+ Set Global Command Register Bits
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] BitMask Bit mask
+**/
+VOID
+VtdLibSetGlobalCommandRegisterBits (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT32 BitMask
+ )
+{
+ UINT32 Reg32;
+ UINT32 Status;
+ UINT32 Command;
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Status = (Reg32 & 0x96FFFFFF); // Reset the one-shot bits
+ Command = (Status | BitMask);
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Command);
+
+ DEBUG ((DEBUG_INFO, "Set GCMD_REG bits 0x%x.\n", BitMask));
+
+ //
+ // Poll on Status bit of Global status register to become not zero
+ //
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & BitMask) == 0);
+ DEBUG ((DEBUG_INFO, "GSTS_REG : 0x%08x \n", Reg32));
+}
+
+/**
+ Disable DMAR translation.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS DMAR translation is disabled.
+**/
+EFI_STATUS
+VtdLibDisableDmar (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ UINT32 Reg32;
+
+ DEBUG ((DEBUG_INFO, ">>>>>>DisableDmar() for engine [%x]\n", VtdUnitBaseAddress));
+
+ //
+ // Write Buffer Flush before invalidation
+ //
+ VtdLibFlushWriteBuffer (VtdUnitBaseAddress);
+
+ //
+ // Disable Dmar
+ //
+ //
+ // Set TE (Translation Enable: BIT31) of Global command register to zero
+ //
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ //
+ // Set SRTP (Set Root Table Pointer: BIT30) of Global command register in order to update the root table pointerDisable VTd
+ //
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ DEBUG ((DEBUG_INFO, "DisableDmar: GSTS_REG - 0x%08x\n", Reg32));
+
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, 0);
+
+ DEBUG ((DEBUG_INFO,"VTD () Disabled!<<<<<<\n"));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Disable PMR.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS PMR is disabled.
+ @retval EFI_UNSUPPORTED PMR is not supported.
+ @retval EFI_NOT_STARTED PMR was not enabled.
+**/
+EFI_STATUS
+VtdLibDisablePmr (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ UINT32 Reg32;
+ VTD_CAP_REG CapReg;
+ EFI_STATUS Status;
+
+ CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ if (CapReg.Bits.PLMR == 0 || CapReg.Bits.PHMR == 0) {
+ //
+ // PMR is not supported
+ //
+ return EFI_UNSUPPORTED;
+ }
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_PMEN_ENABLE_REG);
+ if ((Reg32 & BIT0) != 0) {
+ MmioWrite32 (VtdUnitBaseAddress + R_PMEN_ENABLE_REG, 0x0);
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_PMEN_ENABLE_REG);
+ } while((Reg32 & BIT0) != 0);
+
+ DEBUG ((DEBUG_INFO,"Pmr [0x%016lx] disabled\n", VtdUnitBaseAddress));
+ Status = EFI_SUCCESS;
+ } else {
+ DEBUG ((DEBUG_INFO,"Pmr [0x%016lx] not enabled\n", VtdUnitBaseAddress));
+ Status = EFI_NOT_STARTED;
+ }
+ return Status;
+}
+
+/**
+ Disable queued invalidation interface.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+VtdLibDisableQueuedInvalidationInterface (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ UINT32 Reg32;
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Reg32 &= (~B_GMCD_REG_QIE);
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32);
+
+ DEBUG ((DEBUG_INFO, "Disable Queued Invalidation Interface. [%x] GCMD_REG = 0x%x\n", VtdUnitBaseAddress, Reg32));
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_QIES) != 0);
+
+ MmioWrite64 (VtdUnitBaseAddress + R_IQA_REG, 0);
+}
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+VtdLibFlushPageTableMemory (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINTN Base,
+ IN UINTN Size
+ )
+{
+ VTD_ECAP_REG ECapReg;
+
+ ECapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ if (ECapReg.Bits.C == 0) {
+ WriteBackDataCacheRange ((VOID *)Base, Size);
+ }
+}
+
+/**
+ Submit the queued invalidation descriptor to the remapping
+ hardware unit and wait for its completion.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Desc The invalidate descriptor
+ @param[in] ClearFaultBits Clear Error bits
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval EFI_INVALID_PARAMETER Parameter is invalid.
+ @retval EFI_NOT_READY Queued invalidation is not inited.
+ @retval EFI_DEVICE_ERROR Detect fault, need to clear fault bits if ClearFaultBits is FALSE
+
+**/
+EFI_STATUS
+VtdLibSubmitQueuedInvalidationDescriptor (
+ IN UINTN VtdUnitBaseAddress,
+ IN VOID *Desc,
+ IN BOOLEAN ClearFaultBits
+ )
+{
+ UINTN QueueSize;
+ UINTN QueueTail;
+ UINTN QueueHead;
+ QI_DESC *Qi128Desc;
+ QI_256_DESC *Qi256Desc;
+ VTD_IQA_REG IqaReg;
+ VTD_IQT_REG IqtReg;
+ VTD_IQH_REG IqhReg;
+ UINT32 FaultReg;
+ UINT64 IqercdReg;
+
+ if (Desc == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ IqaReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQA_REG);
+ if (IqaReg.Bits.IQA == 0) {
+ DEBUG ((DEBUG_ERROR,"Invalidation Queue Buffer not ready [0x%lx]\n", IqaReg.Uint64));
+ return EFI_NOT_READY;
+ }
+ IqtReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQT_REG);
+
+ if (IqaReg.Bits.DW == 0) {
+ //
+ // 128-bit descriptor
+ //
+ QueueSize = (UINTN) (1 << (IqaReg.Bits.QS + 8));
+ Qi128Desc = (QI_DESC *) (UINTN) (IqaReg.Bits.IQA << VTD_PAGE_SHIFT);
+ QueueTail = (UINTN) IqtReg.Bits128Desc.QT;
+ Qi128Desc += QueueTail;
+ CopyMem (Qi128Desc, Desc, sizeof (QI_DESC));
+ VtdLibFlushPageTableMemory (VtdUnitBaseAddress, (UINTN) Qi128Desc, sizeof(QI_DESC));
+ QueueTail = (QueueTail + 1) % QueueSize;
+
+ DEBUG ((DEBUG_VERBOSE, "[0x%x] Submit QI Descriptor 0x%x [0x%016lx, 0x%016lx]\n",
+ VtdUnitBaseAddress,
+ QueueTail,
+ Qi128Desc->Low,
+ Qi128Desc->High));
+
+ IqtReg.Bits128Desc.QT = QueueTail;
+ } else {
+ //
+ // 256-bit descriptor
+ //
+ QueueSize = (UINTN) (1 << (IqaReg.Bits.QS + 7));
+ Qi256Desc = (QI_256_DESC *) (UINTN) (IqaReg.Bits.IQA << VTD_PAGE_SHIFT);
+ QueueTail = (UINTN) IqtReg.Bits256Desc.QT;
+ Qi256Desc += QueueTail;
+ CopyMem (Qi256Desc, Desc, sizeof (QI_256_DESC));
+ VtdLibFlushPageTableMemory (VtdUnitBaseAddress, (UINTN) Qi256Desc, sizeof(QI_256_DESC));
+ QueueTail = (QueueTail + 1) % QueueSize;
+
+ DEBUG ((DEBUG_VERBOSE, "[0x%x] Submit QI Descriptor 0x%x [0x%016lx, 0x%016lx, 0x%016lx, 0x%016lx]\n",
+ VtdUnitBaseAddress,
+ QueueTail,
+ Qi256Desc->Uint64[0],
+ Qi256Desc->Uint64[1],
+ Qi256Desc->Uint64[2],
+ Qi256Desc->Uint64[3]));
+
+ IqtReg.Bits256Desc.QT = QueueTail;
+ }
+
+ //
+ // Update the HW tail register indicating the presence of new descriptors.
+ //
+ MmioWrite64 (VtdUnitBaseAddress + R_IQT_REG, IqtReg.Uint64);
+
+ do {
+ FaultReg = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);
+ if (FaultReg & (B_FSTS_REG_IQE | B_FSTS_REG_ITE | B_FSTS_REG_ICE)) {
+ IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+ DEBUG((DEBUG_ERROR, "BAR [0x%016lx] Detect Queue Invalidation Fault [0x%08x] - IQERCD [0x%016lx]\n", VtdUnitBaseAddress, FaultReg, IqercdReg));
+ if (ClearFaultBits) {
+ FaultReg &= (B_FSTS_REG_IQE | B_FSTS_REG_ITE | B_FSTS_REG_ICE);
+ MmioWrite32 (VtdUnitBaseAddress + R_FSTS_REG, FaultReg);
+ }
+ return EFI_DEVICE_ERROR;
+ }
+
+ IqhReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQH_REG);
+ if (IqaReg.Bits.DW == 0) {
+ QueueHead = (UINTN) IqhReg.Bits128Desc.QH;
+ } else {
+ QueueHead = (UINTN) IqhReg.Bits256Desc.QH;
+ }
+ } while (QueueTail != QueueHead);
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf
new file mode 100644
index 000000000..0d6dff5fa
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf
@@ -0,0 +1,30 @@
+### @file
+# Component information file for Intel VTd function library.
+#
+# Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+###
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = IntelVTdPeiDxeLib
+ FILE_GUID = 6cd8b1ea-152d-4cc9-b9b1-f5c692ba63da
+ VERSION_STRING = 1.0
+ MODULE_TYPE = BASE
+ LIBRARY_CLASS = IntelVTdPeiDxeLib
+
+[LibraryClasses]
+ BaseLib
+ PrintLib
+ IoLib
+ CacheMaintenanceLib
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ IntelSiliconPkg/IntelSiliconPkg.dec
+
+[Sources]
+ IntelVTdPeiDxeLib.c
diff --git a/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLibExt.inf b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLibExt.inf
new file mode 100644
index 000000000..9a2b28e12
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLibExt.inf
@@ -0,0 +1,34 @@
+### @file
+# Component information file for Intel VTd function library.
+#
+# Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+###
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = IntelVTdPeiDxeLib
+ FILE_GUID = 6fd8b3aa-852d-6ccA-b9b2-f5c692ba63ca
+ VERSION_STRING = 1.0
+ MODULE_TYPE = BASE
+ LIBRARY_CLASS = IntelVTdPeiDxeLib
+
+[LibraryClasses]
+ BaseLib
+ PrintLib
+ IoLib
+ CacheMaintenanceLib
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ IntelSiliconPkg/IntelSiliconPkg.dec
+
+[Sources]
+ IntelVTdPeiDxeLib.c
+
+[BuildOptions]
+ *_*_X64_CC_FLAGS = -DEXT_CALLBACK
+
--
2.26.2.windows.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* [PATCH] IntelSiliconPkg/Vtd: Add Vtd core drivers
@ 2023-05-23 8:11 Sheng Wei
0 siblings, 0 replies; 2+ messages in thread
From: Sheng Wei @ 2023-05-23 8:11 UTC (permalink / raw)
To: devel; +Cc: Ray Ni, Rangasai V Chaganty, Jenny Huang, Robert Kowalewski
Add 2 drivers (IntelVTdCorePei, IntelVTdCoreDxe)
for pre-boot DMA protection feature.
Signed-off-by: Sheng Wei <w.sheng@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Rangasai V Chaganty <rangasai.v.chaganty@intel.com>
Cc: Jenny Huang <jenny.huang@intel.com>
Cc: Robert Kowalewski <robert.kowalewski@intel.com>
---
.../Feature/VTd/IntelVTdCoreDxe/BmDma.c | 547 +++++
.../VTd/IntelVTdCoreDxe/DmaProtection.c | 705 +++++++
.../VTd/IntelVTdCoreDxe/DmaProtection.h | 668 ++++++
.../VTd/IntelVTdCoreDxe/DmarAcpiTable.c | 398 ++++
.../VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c | 412 ++++
.../VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf | 93 +
.../VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni | 14 +
| 14 +
.../Feature/VTd/IntelVTdCoreDxe/PciInfo.c | 418 ++++
.../VTd/IntelVTdCoreDxe/TranslationTable.c | 1112 ++++++++++
.../VTd/IntelVTdCoreDxe/TranslationTableEx.c | 108 +
.../Feature/VTd/IntelVTdCoreDxe/VtdLog.c | 383 ++++
.../Feature/VTd/IntelVTdCoreDxe/VtdReg.c | 757 +++++++
.../Feature/VTd/IntelVTdCorePei/DmarTable.c | 63 +
.../VTd/IntelVTdCorePei/IntelVTdCorePei.c | 1099 ++++++++++
.../VTd/IntelVTdCorePei/IntelVTdCorePei.h | 262 +++
.../VTd/IntelVTdCorePei/IntelVTdCorePei.inf | 70 +
.../VTd/IntelVTdCorePei/IntelVTdCorePei.uni | 14 +
| 14 +
.../VTd/IntelVTdCorePei/IntelVTdDmar.c | 727 +++++++
.../VTd/IntelVTdCorePei/TranslationTable.c | 926 +++++++++
.../Include/Guid/VtdLogDataHob.h | 151 ++
.../Include/Library/IntelVTdPeiDxeLib.h | 423 ++++
.../IntelSiliconPkg/Include/Protocol/VtdLog.h | 59 +
.../Intel/IntelSiliconPkg/IntelSiliconPkg.dec | 21 +
.../Intel/IntelSiliconPkg/IntelSiliconPkg.dsc | 1 +
.../IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c | 1812 +++++++++++++++++
.../IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf | 30 +
.../IntelVTdPeiDxeLibExt.inf | 34 +
29 files changed, 11335 insertions(+)
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/BmDma.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmarAcpiTable.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxeExtra.uni
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/PciInfo.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTable.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTableEx.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdLog.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdReg.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/DmarTable.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.inf
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.uni
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePeiExtra.uni
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdDmar.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/TranslationTable.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Include/Guid/VtdLogDataHob.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Include/Library/IntelVTdPeiDxeLib.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Include/Protocol/VtdLog.h
create mode 100644 Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c
create mode 100644 Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf
create mode 100644 Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLibExt.inf
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/BmDma.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/BmDma.c
new file mode 100644
index 000000000..41917a004
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/BmDma.c
@@ -0,0 +1,547 @@
+/** @file
+ BmDma related function
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+// TBD: May make it a policy
+#define DMA_MEMORY_TOP MAX_UINTN
+//#define DMA_MEMORY_TOP 0x0000000001FFFFFFULL
+
+#define MAP_HANDLE_INFO_SIGNATURE SIGNATURE_32 ('H', 'M', 'A', 'P')
+typedef struct {
+ UINT32 Signature;
+ LIST_ENTRY Link;
+ EFI_HANDLE DeviceHandle;
+ UINT64 IoMmuAccess;
+} MAP_HANDLE_INFO;
+#define MAP_HANDLE_INFO_FROM_LINK(a) CR (a, MAP_HANDLE_INFO, Link, MAP_HANDLE_INFO_SIGNATURE)
+
+#define MAP_INFO_SIGNATURE SIGNATURE_32 ('D', 'M', 'A', 'P')
+typedef struct {
+ UINT32 Signature;
+ LIST_ENTRY Link;
+ EDKII_IOMMU_OPERATION Operation;
+ UINTN NumberOfBytes;
+ UINTN NumberOfPages;
+ EFI_PHYSICAL_ADDRESS HostAddress;
+ EFI_PHYSICAL_ADDRESS DeviceAddress;
+ LIST_ENTRY HandleList;
+} MAP_INFO;
+#define MAP_INFO_FROM_LINK(a) CR (a, MAP_INFO, Link, MAP_INFO_SIGNATURE)
+
+LIST_ENTRY gMaps = INITIALIZE_LIST_HEAD_VARIABLE(gMaps);
+
+/**
+ This function fills DeviceHandle/IoMmuAccess to the MAP_HANDLE_INFO,
+ based upon the DeviceAddress.
+
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] DeviceAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+**/
+VOID
+SyncDeviceHandleToMapInfo (
+ IN EFI_HANDLE DeviceHandle,
+ IN EFI_PHYSICAL_ADDRESS DeviceAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ MAP_INFO *MapInfo;
+ MAP_HANDLE_INFO *MapHandleInfo;
+ LIST_ENTRY *Link;
+ EFI_TPL OriginalTpl;
+
+ //
+ // Find MapInfo according to DeviceAddress
+ //
+ OriginalTpl = gBS->RaiseTPL (VTD_TPL_LEVEL);
+ MapInfo = NULL;
+ for (Link = GetFirstNode (&gMaps)
+ ; !IsNull (&gMaps, Link)
+ ; Link = GetNextNode (&gMaps, Link)
+ ) {
+ MapInfo = MAP_INFO_FROM_LINK (Link);
+ if (MapInfo->DeviceAddress == DeviceAddress) {
+ break;
+ }
+ }
+ if ((MapInfo == NULL) || (MapInfo->DeviceAddress != DeviceAddress)) {
+ DEBUG ((DEBUG_ERROR, "SyncDeviceHandleToMapInfo: DeviceAddress(0x%lx) - not found\n", DeviceAddress));
+ gBS->RestoreTPL (OriginalTpl);
+ return ;
+ }
+
+ //
+ // Find MapHandleInfo according to DeviceHandle
+ //
+ MapHandleInfo = NULL;
+ for (Link = GetFirstNode (&MapInfo->HandleList)
+ ; !IsNull (&MapInfo->HandleList, Link)
+ ; Link = GetNextNode (&MapInfo->HandleList, Link)
+ ) {
+ MapHandleInfo = MAP_HANDLE_INFO_FROM_LINK (Link);
+ if (MapHandleInfo->DeviceHandle == DeviceHandle) {
+ break;
+ }
+ }
+ if ((MapHandleInfo != NULL) && (MapHandleInfo->DeviceHandle == DeviceHandle)) {
+ MapHandleInfo->IoMmuAccess = IoMmuAccess;
+ gBS->RestoreTPL (OriginalTpl);
+ return ;
+ }
+
+ //
+ // No DeviceHandle
+ // Initialize and insert the MAP_HANDLE_INFO structure
+ //
+ MapHandleInfo = AllocatePool (sizeof (MAP_HANDLE_INFO));
+ if (MapHandleInfo == NULL) {
+ DEBUG ((DEBUG_ERROR, "SyncDeviceHandleToMapInfo: %r\n", EFI_OUT_OF_RESOURCES));
+ gBS->RestoreTPL (OriginalTpl);
+ return ;
+ }
+
+ MapHandleInfo->Signature = MAP_HANDLE_INFO_SIGNATURE;
+ MapHandleInfo->DeviceHandle = DeviceHandle;
+ MapHandleInfo->IoMmuAccess = IoMmuAccess;
+
+ InsertTailList (&MapInfo->HandleList, &MapHandleInfo->Link);
+ gBS->RestoreTPL (OriginalTpl);
+
+ return ;
+}
+
+/**
+ Provides the controller-specific addresses required to access system memory from a
+ DMA bus master.
+
+ @param This The protocol instance pointer.
+ @param Operation Indicates if the bus master is going to read or write to system memory.
+ @param HostAddress The system memory address to map to the PCI controller.
+ @param NumberOfBytes On input the number of bytes to map. On output the number of bytes
+ that were mapped.
+ @param DeviceAddress The resulting map address for the bus master PCI controller to use to
+ access the hosts HostAddress.
+ @param Mapping A resulting value to pass to Unmap().
+
+ @retval EFI_SUCCESS The range was mapped for the returned NumberOfBytes.
+ @retval EFI_UNSUPPORTED The HostAddress cannot be mapped as a common buffer.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources.
+ @retval EFI_DEVICE_ERROR The system hardware could not map the requested address.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuMap (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EDKII_IOMMU_OPERATION Operation,
+ IN VOID *HostAddress,
+ IN OUT UINTN *NumberOfBytes,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT VOID **Mapping
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;
+ MAP_INFO *MapInfo;
+ EFI_PHYSICAL_ADDRESS DmaMemoryTop;
+ BOOLEAN NeedRemap;
+ EFI_TPL OriginalTpl;
+
+ if (NumberOfBytes == NULL || DeviceAddress == NULL ||
+ Mapping == NULL) {
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuMap: ==> 0x%08x - 0x%08x (%x)\n", HostAddress, *NumberOfBytes, Operation));
+
+ //
+ // Make sure that Operation is valid
+ //
+ if ((UINT32) Operation >= EdkiiIoMmuOperationMaximum) {
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+ NeedRemap = FALSE;
+ PhysicalAddress = (EFI_PHYSICAL_ADDRESS) (UINTN) HostAddress;
+
+ DmaMemoryTop = DMA_MEMORY_TOP;
+
+ //
+ // Alignment check
+ //
+ if ((*NumberOfBytes != ALIGN_VALUE(*NumberOfBytes, SIZE_4KB)) ||
+ (PhysicalAddress != ALIGN_VALUE(PhysicalAddress, SIZE_4KB))) {
+ if ((Operation == EdkiiIoMmuOperationBusMasterCommonBuffer) ||
+ (Operation == EdkiiIoMmuOperationBusMasterCommonBuffer64)) {
+ //
+ // The input buffer might be a subset from IoMmuAllocateBuffer.
+ // Skip the check.
+ //
+ } else {
+ NeedRemap = TRUE;
+ }
+ }
+
+ if ((PhysicalAddress + *NumberOfBytes) >= DMA_MEMORY_TOP) {
+ NeedRemap = TRUE;
+ }
+
+ if (((Operation != EdkiiIoMmuOperationBusMasterRead64 &&
+ Operation != EdkiiIoMmuOperationBusMasterWrite64 &&
+ Operation != EdkiiIoMmuOperationBusMasterCommonBuffer64)) &&
+ ((PhysicalAddress + *NumberOfBytes) > SIZE_4GB)) {
+ //
+ // If the root bridge or the device cannot handle performing DMA above
+ // 4GB but any part of the DMA transfer being mapped is above 4GB, then
+ // map the DMA transfer to a buffer below 4GB.
+ //
+ NeedRemap = TRUE;
+ DmaMemoryTop = MIN (DmaMemoryTop, SIZE_4GB - 1);
+ }
+
+ if (Operation == EdkiiIoMmuOperationBusMasterCommonBuffer ||
+ Operation == EdkiiIoMmuOperationBusMasterCommonBuffer64) {
+ if (NeedRemap) {
+ //
+ // Common Buffer operations can not be remapped. If the common buffer
+ // is above 4GB, then it is not possible to generate a mapping, so return
+ // an error.
+ //
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", EFI_UNSUPPORTED));
+ return EFI_UNSUPPORTED;
+ }
+ }
+
+ //
+ // Allocate a MAP_INFO structure to remember the mapping when Unmap() is
+ // called later.
+ //
+ MapInfo = AllocatePool (sizeof (MAP_INFO));
+ if (MapInfo == NULL) {
+ *NumberOfBytes = 0;
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", EFI_OUT_OF_RESOURCES));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ //
+ // Initialize the MAP_INFO structure
+ //
+ MapInfo->Signature = MAP_INFO_SIGNATURE;
+ MapInfo->Operation = Operation;
+ MapInfo->NumberOfBytes = *NumberOfBytes;
+ MapInfo->NumberOfPages = EFI_SIZE_TO_PAGES (MapInfo->NumberOfBytes);
+ MapInfo->HostAddress = PhysicalAddress;
+ MapInfo->DeviceAddress = DmaMemoryTop;
+ InitializeListHead(&MapInfo->HandleList);
+
+ //
+ // Allocate a buffer below 4GB to map the transfer to.
+ //
+ if (NeedRemap) {
+ Status = gBS->AllocatePages (
+ AllocateMaxAddress,
+ EfiBootServicesData,
+ MapInfo->NumberOfPages,
+ &MapInfo->DeviceAddress
+ );
+ if (EFI_ERROR (Status)) {
+ FreePool (MapInfo);
+ *NumberOfBytes = 0;
+ DEBUG ((DEBUG_ERROR, "IoMmuMap: %r\n", Status));
+ return Status;
+ }
+
+ //
+ // If this is a read operation from the Bus Master's point of view,
+ // then copy the contents of the real buffer into the mapped buffer
+ // so the Bus Master can read the contents of the real buffer.
+ //
+ if (Operation == EdkiiIoMmuOperationBusMasterRead ||
+ Operation == EdkiiIoMmuOperationBusMasterRead64) {
+ CopyMem (
+ (VOID *) (UINTN) MapInfo->DeviceAddress,
+ (VOID *) (UINTN) MapInfo->HostAddress,
+ MapInfo->NumberOfBytes
+ );
+ }
+ } else {
+ MapInfo->DeviceAddress = MapInfo->HostAddress;
+ }
+
+ OriginalTpl = gBS->RaiseTPL (VTD_TPL_LEVEL);
+ InsertTailList (&gMaps, &MapInfo->Link);
+ gBS->RestoreTPL (OriginalTpl);
+
+ //
+ // The DeviceAddress is the address of the maped buffer below 4GB
+ //
+ *DeviceAddress = MapInfo->DeviceAddress;
+ //
+ // Return a pointer to the MAP_INFO structure in Mapping
+ //
+ *Mapping = MapInfo;
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuMap: 0x%08x - 0x%08x <==\n", *DeviceAddress, *Mapping));
+
+ VTdLogAddEvent (VTDLOG_DXE_IOMMU_MAP, (UINT64) (*DeviceAddress), (UINT64) Operation);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Completes the Map() operation and releases any corresponding resources.
+
+ @param This The protocol instance pointer.
+ @param Mapping The mapping value returned from Map().
+
+ @retval EFI_SUCCESS The range was unmapped.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_DEVICE_ERROR The data was not committed to the target system memory.
+**/
+EFI_STATUS
+EFIAPI
+IoMmuUnmap (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN VOID *Mapping
+ )
+{
+ MAP_INFO *MapInfo;
+ MAP_HANDLE_INFO *MapHandleInfo;
+ LIST_ENTRY *Link;
+ EFI_TPL OriginalTpl;
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuUnmap: 0x%08x\n", Mapping));
+
+ if (Mapping == NULL) {
+ DEBUG ((DEBUG_ERROR, "IoMmuUnmap: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ OriginalTpl = gBS->RaiseTPL (VTD_TPL_LEVEL);
+ MapInfo = NULL;
+ for (Link = GetFirstNode (&gMaps)
+ ; !IsNull (&gMaps, Link)
+ ; Link = GetNextNode (&gMaps, Link)
+ ) {
+ MapInfo = MAP_INFO_FROM_LINK (Link);
+ if (MapInfo == Mapping) {
+ break;
+ }
+ }
+ //
+ // Mapping is not a valid value returned by Map()
+ //
+ if (MapInfo != Mapping) {
+ gBS->RestoreTPL (OriginalTpl);
+ DEBUG ((DEBUG_ERROR, "IoMmuUnmap: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+ RemoveEntryList (&MapInfo->Link);
+ gBS->RestoreTPL (OriginalTpl);
+
+ //
+ // remove all nodes in MapInfo->HandleList
+ //
+ while (!IsListEmpty (&MapInfo->HandleList)) {
+ MapHandleInfo = MAP_HANDLE_INFO_FROM_LINK (MapInfo->HandleList.ForwardLink);
+ RemoveEntryList (&MapHandleInfo->Link);
+ FreePool (MapHandleInfo);
+ }
+
+ if (MapInfo->DeviceAddress != MapInfo->HostAddress) {
+ //
+ // If this is a write operation from the Bus Master's point of view,
+ // then copy the contents of the mapped buffer into the real buffer
+ // so the processor can read the contents of the real buffer.
+ //
+ if (MapInfo->Operation == EdkiiIoMmuOperationBusMasterWrite ||
+ MapInfo->Operation == EdkiiIoMmuOperationBusMasterWrite64) {
+ CopyMem (
+ (VOID *) (UINTN) MapInfo->HostAddress,
+ (VOID *) (UINTN) MapInfo->DeviceAddress,
+ MapInfo->NumberOfBytes
+ );
+ }
+
+ //
+ // Free the mapped buffer and the MAP_INFO structure.
+ //
+ gBS->FreePages (MapInfo->DeviceAddress, MapInfo->NumberOfPages);
+ }
+
+ VTdLogAddEvent (VTDLOG_DXE_IOMMU_UNMAP, MapInfo->NumberOfBytes, MapInfo->DeviceAddress);
+
+ FreePool (Mapping);
+ return EFI_SUCCESS;
+}
+
+/**
+ Allocates pages that are suitable for an OperationBusMasterCommonBuffer or
+ OperationBusMasterCommonBuffer64 mapping.
+
+ @param This The protocol instance pointer.
+ @param Type This parameter is not used and must be ignored.
+ @param MemoryType The type of memory to allocate, EfiBootServicesData or
+ EfiRuntimeServicesData.
+ @param Pages The number of pages to allocate.
+ @param HostAddress A pointer to store the base system memory address of the
+ allocated range.
+ @param Attributes The requested bit mask of attributes for the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were allocated.
+ @retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are
+ MEMORY_WRITE_COMBINE, MEMORY_CACHED and DUAL_ADDRESS_CYCLE.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuAllocateBuffer (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EFI_ALLOCATE_TYPE Type,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN Pages,
+ IN OUT VOID **HostAddress,
+ IN UINT64 Attributes
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuAllocateBuffer: ==> 0x%08x\n", Pages));
+
+ //
+ // Validate Attributes
+ //
+ if ((Attributes & EDKII_IOMMU_ATTRIBUTE_INVALID_FOR_ALLOCATE_BUFFER) != 0) {
+ DEBUG ((DEBUG_ERROR, "IoMmuAllocateBuffer: %r\n", EFI_UNSUPPORTED));
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Check for invalid inputs
+ //
+ if (HostAddress == NULL) {
+ DEBUG ((DEBUG_ERROR, "IoMmuAllocateBuffer: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // The only valid memory types are EfiBootServicesData and
+ // EfiRuntimeServicesData
+ //
+ if (MemoryType != EfiBootServicesData &&
+ MemoryType != EfiRuntimeServicesData) {
+ DEBUG ((DEBUG_ERROR, "IoMmuAllocateBuffer: %r\n", EFI_INVALID_PARAMETER));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ PhysicalAddress = DMA_MEMORY_TOP;
+ if ((Attributes & EDKII_IOMMU_ATTRIBUTE_DUAL_ADDRESS_CYCLE) == 0) {
+ //
+ // Limit allocations to memory below 4GB
+ //
+ PhysicalAddress = MIN (PhysicalAddress, SIZE_4GB - 1);
+ }
+ Status = gBS->AllocatePages (
+ AllocateMaxAddress,
+ MemoryType,
+ Pages,
+ &PhysicalAddress
+ );
+ if (!EFI_ERROR (Status)) {
+ *HostAddress = (VOID *) (UINTN) PhysicalAddress;
+
+ VTdLogAddEvent (VTDLOG_DXE_IOMMU_ALLOC_BUFFER, (UINT64) Pages, (UINT64) (*HostAddress));
+ }
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuAllocateBuffer: 0x%08x <==\n", *HostAddress));
+
+ return Status;
+}
+
+/**
+ Frees memory that was allocated with AllocateBuffer().
+
+ @param This The protocol instance pointer.
+ @param Pages The number of pages to free.
+ @param HostAddress The base system memory address of the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were freed.
+ @retval EFI_INVALID_PARAMETER The memory range specified by HostAddress and Pages
+ was not allocated with AllocateBuffer().
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuFreeBuffer (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN UINTN Pages,
+ IN VOID *HostAddress
+ )
+{
+ DEBUG ((DEBUG_VERBOSE, "IoMmuFreeBuffer: 0x%\n", Pages));
+
+ VTdLogAddEvent (VTDLOG_DXE_IOMMU_FREE_BUFFER, Pages, (UINT64) HostAddress);
+
+ return gBS->FreePages ((EFI_PHYSICAL_ADDRESS) (UINTN) HostAddress, Pages);
+}
+
+/**
+ Get device information from mapping.
+
+ @param[in] Mapping The mapping.
+ @param[out] DeviceAddress The device address of the mapping.
+ @param[out] NumberOfPages The number of pages of the mapping.
+
+ @retval EFI_SUCCESS The device information is returned.
+ @retval EFI_INVALID_PARAMETER The mapping is invalid.
+**/
+EFI_STATUS
+GetDeviceInfoFromMapping (
+ IN VOID *Mapping,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT UINTN *NumberOfPages
+ )
+{
+ MAP_INFO *MapInfo;
+ LIST_ENTRY *Link;
+
+ if (Mapping == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ MapInfo = NULL;
+ for (Link = GetFirstNode (&gMaps)
+ ; !IsNull (&gMaps, Link)
+ ; Link = GetNextNode (&gMaps, Link)
+ ) {
+ MapInfo = MAP_INFO_FROM_LINK (Link);
+ if (MapInfo == Mapping) {
+ break;
+ }
+ }
+ //
+ // Mapping is not a valid value returned by Map()
+ //
+ if (MapInfo != Mapping) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ *DeviceAddress = MapInfo->DeviceAddress;
+ *NumberOfPages = MapInfo->NumberOfPages;
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.c
new file mode 100644
index 000000000..9fd2b4a44
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.c
@@ -0,0 +1,705 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+UINT64 mBelow4GMemoryLimit;
+UINT64 mAbove4GMemoryLimit;
+
+EDKII_PLATFORM_VTD_POLICY_PROTOCOL *mPlatformVTdPolicy;
+
+VTD_ACCESS_REQUEST *mAccessRequest = NULL;
+UINTN mAccessRequestCount = 0;
+UINTN mAccessRequestMaxCount = 0;
+
+/**
+ Append VTd Access Request to global.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+
+**/
+EFI_STATUS
+RequestAccessAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ VTD_ACCESS_REQUEST *NewAccessRequest;
+ UINTN Index;
+
+ //
+ // Optimization for memory.
+ //
+ // If the last record is to IoMmuAccess=0,
+ // Check previous records and remove the matched entry.
+ //
+ if (IoMmuAccess == 0) {
+ for (Index = 0; Index < mAccessRequestCount; Index++) {
+ if ((mAccessRequest[Index].Segment == Segment) &&
+ (mAccessRequest[Index].SourceId.Uint16 == SourceId.Uint16) &&
+ (mAccessRequest[Index].BaseAddress == BaseAddress) &&
+ (mAccessRequest[Index].Length == Length) &&
+ (mAccessRequest[Index].IoMmuAccess != 0)) {
+ //
+ // Remove this record [Index].
+ // No need to add the new record.
+ //
+ if (Index != mAccessRequestCount - 1) {
+ CopyMem (
+ &mAccessRequest[Index],
+ &mAccessRequest[Index + 1],
+ sizeof (VTD_ACCESS_REQUEST) * (mAccessRequestCount - 1 - Index)
+ );
+ }
+ ZeroMem (&mAccessRequest[mAccessRequestCount - 1], sizeof(VTD_ACCESS_REQUEST));
+ mAccessRequestCount--;
+ return EFI_SUCCESS;
+ }
+ }
+ }
+
+ if (mAccessRequestCount >= mAccessRequestMaxCount) {
+ NewAccessRequest = AllocateZeroPool (sizeof(*NewAccessRequest) * (mAccessRequestMaxCount + MAX_VTD_ACCESS_REQUEST));
+ if (NewAccessRequest == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+ mAccessRequestMaxCount += MAX_VTD_ACCESS_REQUEST;
+ if (mAccessRequest != NULL) {
+ CopyMem (NewAccessRequest, mAccessRequest, sizeof(*NewAccessRequest) * mAccessRequestCount);
+ FreePool (mAccessRequest);
+ }
+ mAccessRequest = NewAccessRequest;
+ }
+
+ ASSERT (mAccessRequestCount < mAccessRequestMaxCount);
+
+ mAccessRequest[mAccessRequestCount].Segment = Segment;
+ mAccessRequest[mAccessRequestCount].SourceId = SourceId;
+ mAccessRequest[mAccessRequestCount].BaseAddress = BaseAddress;
+ mAccessRequest[mAccessRequestCount].Length = Length;
+ mAccessRequest[mAccessRequestCount].IoMmuAccess = IoMmuAccess;
+
+ mAccessRequestCount++;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Process Access Requests from before DMAR table is installed.
+
+**/
+VOID
+ProcessRequestedAccessAttribute (
+ VOID
+ )
+{
+ UINTN Index;
+ EFI_STATUS Status;
+
+ DEBUG ((DEBUG_INFO, "ProcessRequestedAccessAttribute ...\n"));
+
+ for (Index = 0; Index < mAccessRequestCount; Index++) {
+ DEBUG ((
+ DEBUG_INFO,
+ "PCI(S%x.B%x.D%x.F%x) ",
+ mAccessRequest[Index].Segment,
+ mAccessRequest[Index].SourceId.Bits.Bus,
+ mAccessRequest[Index].SourceId.Bits.Device,
+ mAccessRequest[Index].SourceId.Bits.Function
+ ));
+ DEBUG ((
+ DEBUG_INFO,
+ "(0x%lx~0x%lx) - %lx\n",
+ mAccessRequest[Index].BaseAddress,
+ mAccessRequest[Index].Length,
+ mAccessRequest[Index].IoMmuAccess
+ ));
+ Status = SetAccessAttribute (
+ mAccessRequest[Index].Segment,
+ mAccessRequest[Index].SourceId,
+ mAccessRequest[Index].BaseAddress,
+ mAccessRequest[Index].Length,
+ mAccessRequest[Index].IoMmuAccess
+ );
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "SetAccessAttribute %r: ", Status));
+ }
+ }
+
+ if (mAccessRequest != NULL) {
+ FreePool (mAccessRequest);
+ }
+ mAccessRequest = NULL;
+ mAccessRequestCount = 0;
+ mAccessRequestMaxCount = 0;
+
+ DEBUG ((DEBUG_INFO, "ProcessRequestedAccessAttribute Done\n"));
+}
+
+/**
+ Return UEFI memory map information.
+
+ @param[out] Below4GMemoryLimit The below 4GiB memory limit address or 0 if insufficient resources exist to
+ determine the address.
+ @param[out] Above4GMemoryLimit The above 4GiB memory limit address or 0 if insufficient resources exist to
+ determine the address.
+
+**/
+VOID
+ReturnUefiMemoryMap (
+ OUT UINT64 *Below4GMemoryLimit,
+ OUT UINT64 *Above4GMemoryLimit
+ )
+{
+ EFI_STATUS Status;
+ EFI_MEMORY_DESCRIPTOR *EfiMemoryMap;
+ EFI_MEMORY_DESCRIPTOR *EfiMemoryMapEnd;
+ EFI_MEMORY_DESCRIPTOR *EfiEntry;
+ EFI_MEMORY_DESCRIPTOR *NextEfiEntry;
+ EFI_MEMORY_DESCRIPTOR TempEfiEntry;
+ UINTN EfiMemoryMapSize;
+ UINTN EfiMapKey;
+ UINTN EfiDescriptorSize;
+ UINT32 EfiDescriptorVersion;
+ UINT64 MemoryBlockLength;
+
+ *Below4GMemoryLimit = 0;
+ *Above4GMemoryLimit = 0;
+
+ //
+ // Get the EFI memory map.
+ //
+ EfiMemoryMapSize = 0;
+ EfiMemoryMap = NULL;
+ Status = gBS->GetMemoryMap (
+ &EfiMemoryMapSize,
+ EfiMemoryMap,
+ &EfiMapKey,
+ &EfiDescriptorSize,
+ &EfiDescriptorVersion
+ );
+ ASSERT (Status == EFI_BUFFER_TOO_SMALL);
+
+ do {
+ //
+ // Use size returned back plus 1 descriptor for the AllocatePool.
+ // We don't just multiply by 2 since the "for" loop below terminates on
+ // EfiMemoryMapEnd which is dependent upon EfiMemoryMapSize. Otherwize
+ // we process bogus entries and create bogus E820 entries.
+ //
+ EfiMemoryMap = (EFI_MEMORY_DESCRIPTOR *) AllocatePool (EfiMemoryMapSize);
+ if (EfiMemoryMap == NULL) {
+ ASSERT (EfiMemoryMap != NULL);
+ return;
+ }
+
+ Status = gBS->GetMemoryMap (
+ &EfiMemoryMapSize,
+ EfiMemoryMap,
+ &EfiMapKey,
+ &EfiDescriptorSize,
+ &EfiDescriptorVersion
+ );
+ if (EFI_ERROR (Status)) {
+ FreePool (EfiMemoryMap);
+ }
+ } while (Status == EFI_BUFFER_TOO_SMALL);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Sort memory map from low to high
+ //
+ EfiEntry = EfiMemoryMap;
+ NextEfiEntry = NEXT_MEMORY_DESCRIPTOR (EfiEntry, EfiDescriptorSize);
+ EfiMemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) EfiMemoryMap + EfiMemoryMapSize);
+ while (EfiEntry < EfiMemoryMapEnd) {
+ while (NextEfiEntry < EfiMemoryMapEnd) {
+ if (EfiEntry->PhysicalStart > NextEfiEntry->PhysicalStart) {
+ CopyMem (&TempEfiEntry, EfiEntry, sizeof (EFI_MEMORY_DESCRIPTOR));
+ CopyMem (EfiEntry, NextEfiEntry, sizeof (EFI_MEMORY_DESCRIPTOR));
+ CopyMem (NextEfiEntry, &TempEfiEntry, sizeof (EFI_MEMORY_DESCRIPTOR));
+ }
+
+ NextEfiEntry = NEXT_MEMORY_DESCRIPTOR (NextEfiEntry, EfiDescriptorSize);
+ }
+
+ EfiEntry = NEXT_MEMORY_DESCRIPTOR (EfiEntry, EfiDescriptorSize);
+ NextEfiEntry = NEXT_MEMORY_DESCRIPTOR (EfiEntry, EfiDescriptorSize);
+ }
+
+ DEBUG ((DEBUG_INFO, "MemoryMap:\n"));
+ EfiEntry = EfiMemoryMap;
+ EfiMemoryMapEnd = (EFI_MEMORY_DESCRIPTOR *) ((UINT8 *) EfiMemoryMap + EfiMemoryMapSize);
+ while (EfiEntry < EfiMemoryMapEnd) {
+ MemoryBlockLength = (UINT64) (LShiftU64 (EfiEntry->NumberOfPages, 12));
+ DEBUG ((DEBUG_INFO, "Entry(0x%02x) 0x%016lx - 0x%016lx\n", EfiEntry->Type, EfiEntry->PhysicalStart, EfiEntry->PhysicalStart + MemoryBlockLength));
+ switch (EfiEntry->Type) {
+ case EfiLoaderCode:
+ case EfiLoaderData:
+ case EfiBootServicesCode:
+ case EfiBootServicesData:
+ case EfiConventionalMemory:
+ case EfiRuntimeServicesCode:
+ case EfiRuntimeServicesData:
+ case EfiACPIReclaimMemory:
+ case EfiACPIMemoryNVS:
+ case EfiReservedMemoryType:
+ if ((EfiEntry->PhysicalStart + MemoryBlockLength) <= BASE_1MB) {
+ //
+ // Skip the memory block is under 1MB
+ //
+ } else if (EfiEntry->PhysicalStart >= BASE_4GB) {
+ if (*Above4GMemoryLimit < EfiEntry->PhysicalStart + MemoryBlockLength) {
+ *Above4GMemoryLimit = EfiEntry->PhysicalStart + MemoryBlockLength;
+ }
+ } else {
+ if (*Below4GMemoryLimit < EfiEntry->PhysicalStart + MemoryBlockLength) {
+ *Below4GMemoryLimit = EfiEntry->PhysicalStart + MemoryBlockLength;
+ }
+ }
+ break;
+ }
+ EfiEntry = NEXT_MEMORY_DESCRIPTOR (EfiEntry, EfiDescriptorSize);
+ }
+
+ FreePool (EfiMemoryMap);
+
+ DEBUG ((DEBUG_INFO, "Result:\n"));
+ DEBUG ((DEBUG_INFO, "Below4GMemoryLimit: 0x%016lx\n", *Below4GMemoryLimit));
+ DEBUG ((DEBUG_INFO, "Above4GMemoryLimit: 0x%016lx\n", *Above4GMemoryLimit));
+
+ return ;
+}
+
+/**
+ The scan bus callback function to always enable page attribute.
+
+ @param[in] Context The context of the callback.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Device The device of the source.
+ @param[in] Function The function of the source.
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device.
+**/
+EFI_STATUS
+EFIAPI
+ScanBusCallbackAlwaysEnablePageAttribute (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN UINT8 Device,
+ IN UINT8 Function
+ )
+{
+ VTD_SOURCE_ID SourceId;
+ EFI_STATUS Status;
+
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+ Status = AlwaysEnablePageAttribute (Segment, SourceId);
+ return Status;
+}
+
+/**
+ Always enable the VTd page attribute for the device in the DeviceScope.
+
+ @param[in] DeviceScope the input device scope data structure
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device in the device scope.
+**/
+EFI_STATUS
+AlwaysEnablePageAttributeDeviceScope (
+ IN EDKII_PLATFORM_VTD_DEVICE_SCOPE *DeviceScope
+ )
+{
+ UINT8 Bus;
+ UINT8 Device;
+ UINT8 Function;
+ VTD_SOURCE_ID SourceId;
+ UINT8 SecondaryBusNumber;
+ EFI_STATUS Status;
+
+ Status = GetPciBusDeviceFunction (DeviceScope->SegmentNumber, &DeviceScope->DeviceScope, &Bus, &Device, &Function);
+
+ if (DeviceScope->DeviceScope.Type == EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE) {
+ //
+ // Need scan the bridge and add all devices.
+ //
+ SecondaryBusNumber = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(DeviceScope->SegmentNumber, Bus, Device, Function, PCI_BRIDGE_SECONDARY_BUS_REGISTER_OFFSET));
+ Status = ScanPciBus (NULL, DeviceScope->SegmentNumber, SecondaryBusNumber, ScanBusCallbackAlwaysEnablePageAttribute);
+ return Status;
+ } else {
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+ Status = AlwaysEnablePageAttribute (DeviceScope->SegmentNumber, SourceId);
+ return Status;
+ }
+}
+
+/**
+ Always enable the VTd page attribute for the device matching DeviceId.
+
+ @param[in] PciDeviceId the input PCI device ID
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device matching DeviceId.
+**/
+EFI_STATUS
+AlwaysEnablePageAttributePciDeviceId (
+ IN EDKII_PLATFORM_VTD_PCI_DEVICE_ID *PciDeviceId
+ )
+{
+ UINTN VtdIndex;
+ UINTN PciIndex;
+ PCI_DEVICE_DATA *PciDeviceData;
+ EFI_STATUS Status;
+
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ for (PciIndex = 0; PciIndex < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; PciIndex++) {
+ PciDeviceData = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[PciIndex];
+
+ if (((PciDeviceId->VendorId == 0xFFFF) || (PciDeviceId->VendorId == PciDeviceData->PciDeviceId.VendorId)) &&
+ ((PciDeviceId->DeviceId == 0xFFFF) || (PciDeviceId->DeviceId == PciDeviceData->PciDeviceId.DeviceId)) &&
+ ((PciDeviceId->RevisionId == 0xFF) || (PciDeviceId->RevisionId == PciDeviceData->PciDeviceId.RevisionId)) &&
+ ((PciDeviceId->SubsystemVendorId == 0xFFFF) || (PciDeviceId->SubsystemVendorId == PciDeviceData->PciDeviceId.SubsystemVendorId)) &&
+ ((PciDeviceId->SubsystemDeviceId == 0xFFFF) || (PciDeviceId->SubsystemDeviceId == PciDeviceData->PciDeviceId.SubsystemDeviceId)) ) {
+ Status = AlwaysEnablePageAttribute (mVtdUnitInformation[VtdIndex].Segment, PciDeviceData->PciSourceId);
+ if (EFI_ERROR(Status)) {
+ continue;
+ }
+ }
+ }
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Always enable the VTd page attribute for the device.
+
+ @param[in] DeviceInfo the exception device information
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device in the device info.
+**/
+EFI_STATUS
+AlwaysEnablePageAttributeExceptionDeviceInfo (
+ IN EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO *DeviceInfo
+ )
+{
+ switch (DeviceInfo->Type) {
+ case EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO_TYPE_DEVICE_SCOPE:
+ return AlwaysEnablePageAttributeDeviceScope ((VOID *)(DeviceInfo + 1));
+ case EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO_TYPE_PCI_DEVICE_ID:
+ return AlwaysEnablePageAttributePciDeviceId ((VOID *)(DeviceInfo + 1));
+ default:
+ return EFI_UNSUPPORTED;
+ }
+}
+
+/**
+ Initialize platform VTd policy.
+**/
+VOID
+InitializePlatformVTdPolicy (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ UINTN DeviceInfoCount;
+ VOID *DeviceInfo;
+ EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO *ThisDeviceInfo;
+ UINTN Index;
+
+ //
+ // It is optional.
+ //
+ Status = gBS->LocateProtocol (
+ &gEdkiiPlatformVTdPolicyProtocolGuid,
+ NULL,
+ (VOID **)&mPlatformVTdPolicy
+ );
+ if (!EFI_ERROR(Status)) {
+ DEBUG ((DEBUG_INFO, "InitializePlatformVTdPolicy\n"));
+ Status = mPlatformVTdPolicy->GetExceptionDeviceList (mPlatformVTdPolicy, &DeviceInfoCount, &DeviceInfo);
+ if (!EFI_ERROR(Status)) {
+ ThisDeviceInfo = DeviceInfo;
+ for (Index = 0; Index < DeviceInfoCount; Index++) {
+ if (ThisDeviceInfo->Type == EDKII_PLATFORM_VTD_EXCEPTION_DEVICE_INFO_TYPE_END) {
+ break;
+ }
+ AlwaysEnablePageAttributeExceptionDeviceInfo (ThisDeviceInfo);
+ ThisDeviceInfo = (VOID *)((UINTN)ThisDeviceInfo + ThisDeviceInfo->Length);
+ }
+ FreePool (DeviceInfo);
+ }
+ }
+}
+
+/**
+ Setup VTd engine.
+**/
+VOID
+SetupVtd (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ VOID *PciEnumerationComplete;
+ UINTN Index;
+ UINT64 Below4GMemoryLimit;
+ UINT64 Above4GMemoryLimit;
+ VTD_ROOT_TABLE_INFO RootTableInfo;
+
+ //
+ // PCI Enumeration must be done
+ //
+ Status = gBS->LocateProtocol (
+ &gEfiPciEnumerationCompleteProtocolGuid,
+ NULL,
+ &PciEnumerationComplete
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ ReturnUefiMemoryMap (&Below4GMemoryLimit, &Above4GMemoryLimit);
+ Below4GMemoryLimit = ALIGN_VALUE_UP(Below4GMemoryLimit, SIZE_256MB);
+ DEBUG ((DEBUG_INFO, " Adjusted Below4GMemoryLimit: 0x%016lx\n", Below4GMemoryLimit));
+
+ mBelow4GMemoryLimit = Below4GMemoryLimit;
+ mAbove4GMemoryLimit = Above4GMemoryLimit;
+
+ VTdLogAddEvent (VTDLOG_DXE_SETUP_VTD, Below4GMemoryLimit, Above4GMemoryLimit);
+
+ //
+ // 1. setup
+ //
+ DEBUG ((DEBUG_INFO, "ParseDmarAcpiTable\n"));
+ Status = ParseDmarAcpiTableDrhd ();
+ if (EFI_ERROR (Status)) {
+ return;
+ }
+
+ DumpVtdIfError ();
+
+ DEBUG ((DEBUG_INFO, "PrepareVtdConfig\n"));
+ PrepareVtdConfig ();
+
+ //
+ // 2. initialization
+ //
+ DEBUG ((DEBUG_INFO, "SetupTranslationTable\n"));
+ Status = SetupTranslationTable ();
+ if (EFI_ERROR (Status)) {
+ return;
+ }
+
+ InitializePlatformVTdPolicy ();
+
+ ParseDmarAcpiTableRmrr ();
+
+ if ((PcdGet8 (PcdVTdPolicyPropertyMask) & BIT2) == 0) {
+ //
+ // Support IOMMU access attribute request recording before DMAR table is installed.
+ // Here is to process the requests.
+ //
+ ProcessRequestedAccessAttribute ();
+ }
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ DEBUG ((DEBUG_INFO,"VTD Unit %d (Segment: %04x)\n", Index, mVtdUnitInformation[Index].Segment));
+
+ if (mVtdUnitInformation[Index].ExtRootEntryTable != NULL) {
+ VtdLibDumpDmarExtContextEntryTable (NULL, NULL, mVtdUnitInformation[Index].ExtRootEntryTable, mVtdUnitInformation[Index].Is5LevelPaging);
+
+ RootTableInfo.BaseAddress = mVtdUnitInformation[Index].VtdUnitBaseAddress;
+ RootTableInfo.TableAddress = (UINT64) (UINTN) mVtdUnitInformation[Index].RootEntryTable;
+ RootTableInfo.Is5LevelPaging = mVtdUnitInformation[Index].Is5LevelPaging;
+ VTdLogAddDataEvent (VTDLOG_DXE_ROOT_TABLE, 1, &RootTableInfo, sizeof (VTD_ROOT_TABLE_INFO));
+ }
+
+ if (mVtdUnitInformation[Index].RootEntryTable != NULL) {
+ VtdLibDumpDmarContextEntryTable (NULL, NULL, mVtdUnitInformation[Index].RootEntryTable, mVtdUnitInformation[Index].Is5LevelPaging);
+
+ RootTableInfo.BaseAddress = mVtdUnitInformation[Index].VtdUnitBaseAddress;
+ RootTableInfo.TableAddress = (UINT64) (UINTN) mVtdUnitInformation[Index].RootEntryTable;
+ RootTableInfo.Is5LevelPaging = mVtdUnitInformation[Index].Is5LevelPaging;
+ VTdLogAddDataEvent (VTDLOG_DXE_ROOT_TABLE, 0, &RootTableInfo, sizeof (VTD_ROOT_TABLE_INFO));
+ }
+ }
+
+ //
+ // 3. enable
+ //
+ DEBUG ((DEBUG_INFO, "EnableDmar\n"));
+ Status = EnableDmar ();
+ if (EFI_ERROR (Status)) {
+ return;
+ }
+ DEBUG ((DEBUG_INFO, "DumpVtdRegs\n"));
+ DumpVtdRegsAll ();
+}
+
+/**
+ Notification function of ACPI Table change.
+
+ This is a notification function registered on ACPI Table change event.
+
+ @param Event Event whose notification function is being invoked.
+ @param Context Pointer to the notification function's context.
+
+**/
+VOID
+EFIAPI
+AcpiNotificationFunc (
+ IN EFI_EVENT Event,
+ IN VOID *Context
+ )
+{
+ EFI_STATUS Status;
+
+ Status = GetDmarAcpiTable ();
+ if (EFI_ERROR (Status)) {
+ if (Status == EFI_ALREADY_STARTED) {
+ gBS->CloseEvent (Event);
+ }
+ return;
+ }
+ SetupVtd ();
+ gBS->CloseEvent (Event);
+}
+
+/**
+ Exit boot service callback function.
+
+ @param[in] Event The event handle.
+ @param[in] Context The event content.
+**/
+VOID
+EFIAPI
+OnExitBootServices (
+ IN EFI_EVENT Event,
+ IN VOID *Context
+ )
+{
+ UINTN VtdIndex;
+
+ DEBUG ((DEBUG_INFO, "Vtd OnExitBootServices\n"));
+
+ DumpVtdRegsAll ();
+
+ DEBUG ((DEBUG_INFO, "Invalidate all\n"));
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ VtdLibFlushWriteBuffer (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress);
+
+ InvalidateContextCache (VtdIndex);
+
+ InvalidateIOTLB (VtdIndex);
+ }
+
+ if ((PcdGet8(PcdVTdPolicyPropertyMask) & BIT1) == 0) {
+ DisableDmar ();
+ DumpVtdRegsAll ();
+ }
+}
+
+/**
+ Legacy boot callback function.
+
+ @param[in] Event The event handle.
+ @param[in] Context The event content.
+**/
+VOID
+EFIAPI
+OnLegacyBoot (
+ EFI_EVENT Event,
+ VOID *Context
+ )
+{
+ DEBUG ((DEBUG_INFO, "Vtd OnLegacyBoot\n"));
+ DumpVtdRegsAll ();
+ DisableDmar ();
+ DumpVtdRegsAll ();
+}
+
+/**
+ Initialize DMA protection.
+**/
+VOID
+InitializeDmaProtection (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_EVENT ExitBootServicesEvent;
+ EFI_EVENT LegacyBootEvent;
+ EFI_EVENT EventAcpi10;
+ EFI_EVENT EventAcpi20;
+
+ Status = gBS->CreateEventEx (
+ EVT_NOTIFY_SIGNAL,
+ VTD_TPL_LEVEL,
+ AcpiNotificationFunc,
+ NULL,
+ &gEfiAcpi10TableGuid,
+ &EventAcpi10
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ Status = gBS->CreateEventEx (
+ EVT_NOTIFY_SIGNAL,
+ VTD_TPL_LEVEL,
+ AcpiNotificationFunc,
+ NULL,
+ &gEfiAcpi20TableGuid,
+ &EventAcpi20
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Signal the events initially for the case
+ // that DMAR table has been installed.
+ //
+ gBS->SignalEvent (EventAcpi20);
+ gBS->SignalEvent (EventAcpi10);
+
+ Status = gBS->CreateEventEx (
+ EVT_NOTIFY_SIGNAL,
+ TPL_CALLBACK,
+ OnExitBootServices,
+ NULL,
+ &gEfiEventExitBootServicesGuid,
+ &ExitBootServicesEvent
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ Status = EfiCreateEventLegacyBootEx (
+ TPL_CALLBACK,
+ OnLegacyBoot,
+ NULL,
+ &LegacyBootEvent
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ return ;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.h b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.h
new file mode 100644
index 000000000..4b2f451b1
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmaProtection.h
@@ -0,0 +1,668 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _DMAR_PROTECTION_H_
+#define _DMAR_PROTECTION_H_
+
+#include <Uefi.h>
+#include <PiDxe.h>
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/UefiBootServicesTableLib.h>
+#include <Library/IoLib.h>
+#include <Library/PciSegmentLib.h>
+#include <Library/DebugLib.h>
+#include <Library/UefiLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/PerformanceLib.h>
+#include <Library/PrintLib.h>
+#include <Library/ReportStatusCodeLib.h>
+#include <Library/HobLib.h>
+
+#include <Guid/EventGroup.h>
+#include <Guid/Acpi.h>
+
+#include <Protocol/VtdLog.h>
+#include <Protocol/DxeSmmReadyToLock.h>
+#include <Protocol/PciRootBridgeIo.h>
+#include <Protocol/PciIo.h>
+#include <Protocol/PciEnumerationComplete.h>
+#include <Protocol/PlatformVtdPolicy.h>
+#include <Protocol/IoMmu.h>
+#include <Protocol/PciRootBridgeIo.h>
+
+#include <IndustryStandard/Pci.h>
+#include <IndustryStandard/DmaRemappingReportingTable.h>
+#include <IndustryStandard/Vtd.h>
+
+#include <Library/IntelVTdPeiDxeLib.h>
+
+#define VTD_64BITS_ADDRESS(Lo, Hi) (LShiftU64 (Lo, 12) | LShiftU64 (Hi, 32))
+
+#define ALIGN_VALUE_UP(Value, Alignment) (((Value) + (Alignment) - 1) & (~((Alignment) - 1)))
+#define ALIGN_VALUE_LOW(Value, Alignment) ((Value) & (~((Alignment) - 1)))
+
+#define VTD_TPL_LEVEL TPL_NOTIFY
+
+//
+// Use 256-bit descriptor
+// Queue size is 128.
+//
+#define VTD_QUEUED_INVALIDATION_DESCRIPTOR_WIDTH 1
+#define VTD_INVALIDATION_QUEUE_SIZE 0
+
+//
+// This is the initial max PCI DATA number.
+// The number may be enlarged later.
+//
+#define MAX_VTD_PCI_DATA_NUMBER 0x100
+
+typedef struct {
+ UINTN VtdUnitBaseAddress;
+ UINT16 Segment;
+ VTD_VER_REG VerReg;
+ VTD_CAP_REG CapReg;
+ VTD_ECAP_REG ECapReg;
+ VTD_ROOT_ENTRY *RootEntryTable;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntryTable;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *FixedSecondLevelPagingEntry;
+ BOOLEAN HasDirtyContext;
+ BOOLEAN HasDirtyPages;
+ PCI_DEVICE_INFORMATION *PciDeviceInfo;
+ BOOLEAN Is5LevelPaging;
+ UINT8 EnableQueuedInvalidation;
+ VOID *QiDescBuffer;
+ UINTN QiDescBufferSize;
+} VTD_UNIT_INFORMATION;
+
+//
+// This is the initial max ACCESS request.
+// The number may be enlarged later.
+//
+#define MAX_VTD_ACCESS_REQUEST 0x100
+
+typedef struct {
+ UINT16 Segment;
+ VTD_SOURCE_ID SourceId;
+ UINT64 BaseAddress;
+ UINT64 Length;
+ UINT64 IoMmuAccess;
+} VTD_ACCESS_REQUEST;
+
+
+/**
+ The scan bus callback function.
+
+ It is called in PCI bus scan for each PCI device under the bus.
+
+ @param[in] Context The context of the callback.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Device The device of the source.
+ @param[in] Function The function of the source.
+
+ @retval EFI_SUCCESS The specific PCI device is processed in the callback.
+**/
+typedef
+EFI_STATUS
+(EFIAPI *SCAN_BUS_FUNC_CALLBACK_FUNC) (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN UINT8 Device,
+ IN UINT8 Function
+ );
+
+extern EFI_ACPI_DMAR_HEADER *mAcpiDmarTable;
+
+extern UINTN mVtdUnitNumber;
+extern VTD_UNIT_INFORMATION *mVtdUnitInformation;
+
+extern UINT64 mBelow4GMemoryLimit;
+extern UINT64 mAbove4GMemoryLimit;
+
+extern EDKII_PLATFORM_VTD_POLICY_PROTOCOL *mPlatformVTdPolicy;
+
+/**
+ Prepare VTD configuration.
+**/
+VOID
+PrepareVtdConfig (
+ VOID
+ );
+
+/**
+ Setup VTd translation table.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCE Setup translation table fail.
+**/
+EFI_STATUS
+SetupTranslationTable (
+ VOID
+ );
+
+/**
+ Enable DMAR translation.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableDmar (
+ VOID
+ );
+
+/**
+ Disable DMAR translation.
+
+ @retval EFI_SUCCESS DMAR translation is disabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not disabled.
+**/
+EFI_STATUS
+DisableDmar (
+ VOID
+ );
+
+/**
+ Perpare cache invalidation interface.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval EFI_UNSUPPORTED Invalidation method is not supported.
+ @retval EFI_OUT_OF_RESOURCES A memory allocation failed.
+**/
+EFI_STATUS
+PerpareCacheInvalidationInterface (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Invalidate VTd context cache.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+**/
+EFI_STATUS
+InvalidateContextCache (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Invalidate VTd IOTLB.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+**/
+EFI_STATUS
+InvalidateIOTLB (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Invalid VTd global IOTLB.
+
+ @param[in] VtdIndex The index of VTd engine.
+
+ @retval EFI_SUCCESS VTd global IOTLB is invalidated.
+ @retval EFI_DEVICE_ERROR VTd global IOTLB is not invalidated.
+**/
+EFI_STATUS
+InvalidateVtdIOTLBGlobal (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Dump VTd registers.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+DumpVtdRegs (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ Dump VTd registers for all VTd engine.
+**/
+VOID
+DumpVtdRegsAll (
+ VOID
+ );
+
+/**
+ Dump VTd version registers.
+
+ @param[in] VerReg The version register.
+**/
+VOID
+DumpVtdVerRegs (
+ IN VTD_VER_REG *VerReg
+ );
+
+/**
+ Dump VTd capability registers.
+
+ @param[in] CapReg The capability register.
+**/
+VOID
+DumpVtdCapRegs (
+ IN VTD_CAP_REG *CapReg
+ );
+
+/**
+ Dump VTd extended capability registers.
+
+ @param[in] ECapReg The extended capability register.
+**/
+VOID
+DumpVtdECapRegs (
+ IN VTD_ECAP_REG *ECapReg
+ );
+
+/**
+ Register PCI device to VTd engine.
+
+ @param[in] VtdIndex The index of VTd engine.
+ @param[in] Segment The segment of the source.
+ @param[in] SourceId The SourceId of the source.
+ @param[in] DeviceType The DMAR device scope type.
+ @param[in] CheckExist TRUE: ERROR will be returned if the PCI device is already registered.
+ FALSE: SUCCESS will be returned if the PCI device is registered.
+
+ @retval EFI_SUCCESS The PCI device is registered.
+ @retval EFI_OUT_OF_RESOURCES No enough resource to register a new PCI device.
+ @retval EFI_ALREADY_STARTED The device is already registered.
+**/
+EFI_STATUS
+RegisterPciDevice (
+ IN UINTN VtdIndex,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT8 DeviceType,
+ IN BOOLEAN CheckExist
+ );
+
+/**
+ The scan bus callback function to always enable page attribute.
+
+ @param[in] Context The context of the callback.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Device The device of the source.
+ @param[in] Function The function of the source.
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device.
+**/
+EFI_STATUS
+EFIAPI
+ScanBusCallbackRegisterPciDevice (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN UINT8 Device,
+ IN UINT8 Function
+ );
+
+/**
+ Scan PCI bus and invoke callback function for each PCI devices under the bus.
+
+ @param[in] Context The context of the callback function.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Callback The callback function in PCI scan.
+
+ @retval EFI_SUCCESS The PCI devices under the bus are scaned.
+**/
+EFI_STATUS
+ScanPciBus (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN SCAN_BUS_FUNC_CALLBACK_FUNC Callback
+ );
+
+/**
+ Scan PCI bus and invoke callback function for each PCI devices under all root bus.
+
+ @param[in] Context The context of the callback function.
+ @param[in] Segment The segment of the source.
+ @param[in] Callback The callback function in PCI scan.
+
+ @retval EFI_SUCCESS The PCI devices under the bus are scaned.
+**/
+EFI_STATUS
+ScanAllPciBus (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN SCAN_BUS_FUNC_CALLBACK_FUNC Callback
+ );
+
+/**
+ Find the VTd index by the Segment and SourceId.
+
+ @param[in] Segment The segment of the source.
+ @param[in] SourceId The SourceId of the source.
+ @param[out] ExtContextEntry The ExtContextEntry of the source.
+ @param[out] ContextEntry The ContextEntry of the source.
+
+ @return The index of the VTd engine.
+ @retval (UINTN)-1 The VTd engine is not found.
+**/
+UINTN
+FindVtdIndexByPciDevice (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ OUT VTD_EXT_CONTEXT_ENTRY **ExtContextEntry,
+ OUT VTD_CONTEXT_ENTRY **ContextEntry
+ );
+
+/**
+ Get the DMAR ACPI table.
+
+ @retval EFI_SUCCESS The DMAR ACPI table is got.
+ @retval EFI_ALREADY_STARTED The DMAR ACPI table has been got previously.
+ @retval EFI_NOT_FOUND The DMAR ACPI table is not found.
+**/
+EFI_STATUS
+GetDmarAcpiTable (
+ VOID
+ );
+
+/**
+ Parse DMAR DRHD table.
+
+ @return EFI_SUCCESS The DMAR DRHD table is parsed.
+**/
+EFI_STATUS
+ParseDmarAcpiTableDrhd (
+ VOID
+ );
+
+/**
+ Parse DMAR RMRR table.
+
+ @return EFI_SUCCESS The DMAR RMRR table is parsed.
+**/
+EFI_STATUS
+ParseDmarAcpiTableRmrr (
+ VOID
+ );
+
+/**
+ Set VTd attribute for a system memory.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] DomainIdentifier The domain ID of the source.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetPageAttribute (
+ IN UINTN VtdIndex,
+ IN UINT16 DomainIdentifier,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ );
+
+/**
+ Set VTd attribute for a system memory.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetAccessAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ );
+
+/**
+ Return the index of PCI data.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @return The index of the PCI data.
+ @retval (UINTN)-1 The PCI data is not found.
+**/
+UINTN
+GetPciDataIndex (
+ IN UINTN VtdIndex,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ );
+
+/**
+ Dump VTd registers if there is error.
+**/
+VOID
+DumpVtdIfError (
+ VOID
+ );
+
+/**
+ Initialize platform VTd policy.
+**/
+VOID
+InitializePlatformVTdPolicy (
+ VOID
+ );
+
+/**
+ Always enable the VTd page attribute for the device.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device.
+**/
+EFI_STATUS
+AlwaysEnablePageAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ );
+
+/**
+ Convert the DeviceHandle to SourceId and Segment.
+
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[out] Segment The Segment used to identify a VTd engine.
+ @param[out] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @retval EFI_SUCCESS The Segment and SourceId are returned.
+ @retval EFI_INVALID_PARAMETER DeviceHandle is an invalid handle.
+ @retval EFI_UNSUPPORTED DeviceHandle is unknown by the IOMMU.
+**/
+EFI_STATUS
+DeviceHandleToSourceId (
+ IN EFI_HANDLE DeviceHandle,
+ OUT UINT16 *Segment,
+ OUT VTD_SOURCE_ID *SourceId
+ );
+
+/**
+ Get device information from mapping.
+
+ @param[in] Mapping The mapping.
+ @param[out] DeviceAddress The device address of the mapping.
+ @param[out] NumberOfPages The number of pages of the mapping.
+
+ @retval EFI_SUCCESS The device information is returned.
+ @retval EFI_INVALID_PARAMETER The mapping is invalid.
+**/
+EFI_STATUS
+GetDeviceInfoFromMapping (
+ IN VOID *Mapping,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT UINTN *NumberOfPages
+ );
+
+/**
+ Initialize DMA protection.
+**/
+VOID
+InitializeDmaProtection (
+ VOID
+ );
+
+/**
+ Allocate zero pages.
+
+ @param[in] Pages the number of pages.
+
+ @return the page address.
+ @retval NULL No resource to allocate pages.
+**/
+VOID *
+EFIAPI
+AllocateZeroPages (
+ IN UINTN Pages
+ );
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+FlushPageTableMemory (
+ IN UINTN VtdIndex,
+ IN UINTN Base,
+ IN UINTN Size
+ );
+
+/**
+ Get PCI device information from DMAR DevScopeEntry.
+
+ @param[in] Segment The segment number.
+ @param[in] DmarDevScopeEntry DMAR DevScopeEntry
+ @param[out] Bus The bus number.
+ @param[out] Device The device number.
+ @param[out] Function The function number.
+
+ @retval EFI_SUCCESS The PCI device information is returned.
+**/
+EFI_STATUS
+GetPciBusDeviceFunction (
+ IN UINT16 Segment,
+ IN EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDevScopeEntry,
+ OUT UINT8 *Bus,
+ OUT UINT8 *Device,
+ OUT UINT8 *Function
+ );
+
+/**
+ Append VTd Access Request to global.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+
+**/
+EFI_STATUS
+RequestAccessAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ );
+
+/**
+ Add a new VTd log event.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+
+**/
+VOID
+EFIAPI
+VTdLogAddEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Data1,
+ IN CONST UINT64 Data2
+ );
+
+/**
+ Add a new VTd log event with data.
+
+ @param[in] EventType Event type
+ @param[in] Param parameter
+ @param[in] Data Data
+ @param[in] DataSize Data size
+
+**/
+VOID
+EFIAPI
+VTdLogAddDataEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Param,
+ IN CONST VOID *Data,
+ IN CONST UINT32 DataSize
+ );
+
+/**
+ Initializes the VTd Log.
+
+**/
+VOID
+EFIAPI
+VTdLogInitialize(
+ VOID
+ );
+
+#endif
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmarAcpiTable.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmarAcpiTable.c
new file mode 100644
index 000000000..21f559983
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/DmarAcpiTable.c
@@ -0,0 +1,398 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+#pragma pack(1)
+
+typedef struct {
+ EFI_ACPI_DESCRIPTION_HEADER Header;
+ UINT32 Entry;
+} RSDT_TABLE;
+
+typedef struct {
+ EFI_ACPI_DESCRIPTION_HEADER Header;
+ UINT64 Entry;
+} XSDT_TABLE;
+
+#pragma pack()
+
+EFI_ACPI_DMAR_HEADER *mAcpiDmarTable = NULL;
+
+/**
+ Dump DMAR ACPI table.
+**/
+VOID
+VtdDumpDmarTable (
+ VOID
+ )
+{
+ VtdLibDumpAcpiDmar (NULL, NULL, (EFI_ACPI_DMAR_HEADER *) (UINTN) mAcpiDmarTable);
+
+ VTdLogAddDataEvent (VTDLOG_DXE_DMAR_TABLE, mAcpiDmarTable->Header.Length, (VOID *)mAcpiDmarTable, mAcpiDmarTable->Header.Length);
+}
+
+/**
+ Get PCI device information from DMAR DevScopeEntry.
+
+ @param[in] Segment The segment number.
+ @param[in] DmarDevScopeEntry DMAR DevScopeEntry
+ @param[out] Bus The bus number.
+ @param[out] Device The device number.
+ @param[out] Function The function number.
+
+ @retval EFI_SUCCESS The PCI device information is returned.
+**/
+EFI_STATUS
+GetPciBusDeviceFunction (
+ IN UINT16 Segment,
+ IN EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDevScopeEntry,
+ OUT UINT8 *Bus,
+ OUT UINT8 *Device,
+ OUT UINT8 *Function
+ )
+{
+ EFI_ACPI_DMAR_PCI_PATH *DmarPciPath;
+ UINT8 MyBus;
+ UINT8 MyDevice;
+ UINT8 MyFunction;
+
+ DmarPciPath = (EFI_ACPI_DMAR_PCI_PATH *)((UINTN)(DmarDevScopeEntry + 1));
+ MyBus = DmarDevScopeEntry->StartBusNumber;
+ MyDevice = DmarPciPath->Device;
+ MyFunction = DmarPciPath->Function;
+
+ switch (DmarDevScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ while ((UINTN)DmarPciPath + sizeof(EFI_ACPI_DMAR_PCI_PATH) < (UINTN)DmarDevScopeEntry + DmarDevScopeEntry->Length) {
+ MyBus = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, MyBus, MyDevice, MyFunction, PCI_BRIDGE_SECONDARY_BUS_REGISTER_OFFSET));
+ DmarPciPath ++;
+ MyDevice = DmarPciPath->Device;
+ MyFunction = DmarPciPath->Function;
+ }
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_IOAPIC:
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_MSI_CAPABLE_HPET:
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_ACPI_NAMESPACE_DEVICE:
+ break;
+ }
+
+ *Bus = MyBus;
+ *Device = MyDevice;
+ *Function = MyFunction;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Process DMAR DRHD table.
+
+ @param[in] VtdIndex The index of VTd engine.
+ @param[in] DmarDrhd The DRHD table.
+
+ @retval EFI_SUCCESS The DRHD table is processed.
+**/
+EFI_STATUS
+ProcessDrhd (
+ IN UINTN VtdIndex,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *DmarDrhd
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDevScopeEntry;
+ UINT8 Bus;
+ UINT8 Device;
+ UINT8 Function;
+ UINT8 SecondaryBusNumber;
+ EFI_STATUS Status;
+ VTD_SOURCE_ID SourceId;
+
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo = AllocateZeroPool (sizeof (PCI_DEVICE_INFORMATION) + sizeof (PCI_DEVICE_DATA) * MAX_VTD_PCI_DATA_NUMBER);
+ if (mVtdUnitInformation[VtdIndex].PciDeviceInfo == NULL) {
+ ASSERT (FALSE);
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ mVtdUnitInformation[VtdIndex].Segment = DmarDrhd->SegmentNumber;
+ mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress = (UINTN)DmarDrhd->RegisterBaseAddress;
+ DEBUG ((DEBUG_INFO," VTD (%d) BaseAddress - 0x%016lx\n", VtdIndex, DmarDrhd->RegisterBaseAddress));
+
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->Segment = DmarDrhd->SegmentNumber;
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataMaxNumber = MAX_VTD_PCI_DATA_NUMBER;
+
+ if ((DmarDrhd->Flags & EFI_ACPI_DMAR_DRHD_FLAGS_INCLUDE_PCI_ALL) != 0) {
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->IncludeAllFlag = TRUE;
+ DEBUG ((DEBUG_INFO," ProcessDrhd: with INCLUDE ALL\n"));
+
+ Status = ScanAllPciBus((VOID *)VtdIndex, DmarDrhd->SegmentNumber, ScanBusCallbackRegisterPciDevice);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ } else {
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->IncludeAllFlag = FALSE;
+ DEBUG ((DEBUG_INFO," ProcessDrhd: without INCLUDE ALL\n"));
+ }
+
+ DmarDevScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)(DmarDrhd + 1));
+ while ((UINTN)DmarDevScopeEntry < (UINTN)DmarDrhd + DmarDrhd->Header.Length) {
+
+ Status = GetPciBusDeviceFunction (DmarDrhd->SegmentNumber, DmarDevScopeEntry, &Bus, &Device, &Function);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ DEBUG ((DEBUG_INFO," ProcessDrhd: "));
+ switch (DmarDevScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ DEBUG ((DEBUG_INFO,"PCI Endpoint"));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ DEBUG ((DEBUG_INFO,"PCI-PCI bridge"));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_IOAPIC:
+ DEBUG ((DEBUG_INFO,"IOAPIC"));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_MSI_CAPABLE_HPET:
+ DEBUG ((DEBUG_INFO,"MSI Capable HPET"));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_ACPI_NAMESPACE_DEVICE:
+ DEBUG ((DEBUG_INFO,"ACPI Namespace Device"));
+ break;
+ }
+ DEBUG ((DEBUG_INFO," S%04x B%02x D%02x F%02x\n", DmarDrhd->SegmentNumber, Bus, Device, Function));
+
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+
+ Status = RegisterPciDevice (VtdIndex, DmarDrhd->SegmentNumber, SourceId, DmarDevScopeEntry->Type, TRUE);
+ if (EFI_ERROR (Status)) {
+ //
+ // There might be duplication for special device other than standard PCI device.
+ //
+ switch (DmarDevScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ return Status;
+ }
+ }
+
+ switch (DmarDevScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ SecondaryBusNumber = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(DmarDrhd->SegmentNumber, Bus, Device, Function, PCI_BRIDGE_SECONDARY_BUS_REGISTER_OFFSET));
+ Status = ScanPciBus ((VOID *)VtdIndex, DmarDrhd->SegmentNumber, SecondaryBusNumber, ScanBusCallbackRegisterPciDevice);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ break;
+ default:
+ break;
+ }
+
+ DmarDevScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDevScopeEntry + DmarDevScopeEntry->Length);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Process DMAR RMRR table.
+
+ @param[in] DmarRmrr The RMRR table.
+
+ @retval EFI_SUCCESS The RMRR table is processed.
+**/
+EFI_STATUS
+ProcessRmrr (
+ IN EFI_ACPI_DMAR_RMRR_HEADER *DmarRmrr
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDevScopeEntry;
+ UINT8 Bus;
+ UINT8 Device;
+ UINT8 Function;
+ EFI_STATUS Status;
+ VTD_SOURCE_ID SourceId;
+
+ DEBUG ((DEBUG_INFO," RMRR (Base 0x%016lx, Limit 0x%016lx)\n", DmarRmrr->ReservedMemoryRegionBaseAddress, DmarRmrr->ReservedMemoryRegionLimitAddress));
+
+ DmarDevScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)(DmarRmrr + 1));
+ while ((UINTN)DmarDevScopeEntry < (UINTN)DmarRmrr + DmarRmrr->Header.Length) {
+ if (DmarDevScopeEntry->Type != EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT) {
+ DEBUG ((DEBUG_INFO,"RMRR DevScopeEntryType is not endpoint, type[0x%x] \n", DmarDevScopeEntry->Type));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Status = GetPciBusDeviceFunction (DmarRmrr->SegmentNumber, DmarDevScopeEntry, &Bus, &Device, &Function);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ DEBUG ((DEBUG_INFO,"RMRR S%04x B%02x D%02x F%02x\n", DmarRmrr->SegmentNumber, Bus, Device, Function));
+
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+ Status = SetAccessAttribute (
+ DmarRmrr->SegmentNumber,
+ SourceId,
+ DmarRmrr->ReservedMemoryRegionBaseAddress,
+ DmarRmrr->ReservedMemoryRegionLimitAddress + 1 - DmarRmrr->ReservedMemoryRegionBaseAddress,
+ EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE
+ );
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ DmarDevScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDevScopeEntry + DmarDevScopeEntry->Length);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Get VTd engine number.
+**/
+UINTN
+GetVtdEngineNumber (
+ VOID
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ UINTN VtdIndex;
+
+ VtdIndex = 0;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)(mAcpiDmarTable + 1));
+ while ((UINTN)DmarHeader < (UINTN)mAcpiDmarTable + mAcpiDmarTable->Header.Length) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ VtdIndex++;
+ break;
+ default:
+ break;
+ }
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+ return VtdIndex ;
+}
+
+/**
+ Parse DMAR DRHD table.
+
+ @return EFI_SUCCESS The DMAR DRHD table is parsed.
+**/
+EFI_STATUS
+ParseDmarAcpiTableDrhd (
+ VOID
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ EFI_STATUS Status;
+ UINTN VtdIndex;
+
+ mVtdUnitNumber = GetVtdEngineNumber ();
+ DEBUG ((DEBUG_INFO," VtdUnitNumber - %d\n", mVtdUnitNumber));
+ ASSERT (mVtdUnitNumber > 0);
+ if (mVtdUnitNumber == 0) {
+ return EFI_DEVICE_ERROR;
+ }
+
+ mVtdUnitInformation = AllocateZeroPool (sizeof(*mVtdUnitInformation) * mVtdUnitNumber);
+ ASSERT (mVtdUnitInformation != NULL);
+ if (mVtdUnitInformation == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ VtdIndex = 0;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)(mAcpiDmarTable + 1));
+ while ((UINTN)DmarHeader < (UINTN)mAcpiDmarTable + mAcpiDmarTable->Header.Length) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ ASSERT (VtdIndex < mVtdUnitNumber);
+ Status = ProcessDrhd (VtdIndex, (EFI_ACPI_DMAR_DRHD_HEADER *)DmarHeader);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ VtdIndex++;
+
+ break;
+
+ default:
+ break;
+ }
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+ ASSERT (VtdIndex == mVtdUnitNumber);
+
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ VtdLibDumpPciDeviceInfo (NULL, NULL, mVtdUnitInformation[VtdIndex].PciDeviceInfo);
+
+ VTdLogAddDataEvent (VTDLOG_DXE_PCI_DEVICE,
+ mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress,
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo,
+ sizeof (PCI_DEVICE_INFORMATION) + sizeof (PCI_DEVICE_DATA) * mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber);
+ }
+ return EFI_SUCCESS ;
+}
+
+/**
+ Parse DMAR DRHD table.
+
+ @return EFI_SUCCESS The DMAR DRHD table is parsed.
+**/
+EFI_STATUS
+ParseDmarAcpiTableRmrr (
+ VOID
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ EFI_STATUS Status;
+
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)(mAcpiDmarTable + 1));
+ while ((UINTN)DmarHeader < (UINTN)mAcpiDmarTable + mAcpiDmarTable->Header.Length) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_RMRR:
+ Status = ProcessRmrr ((EFI_ACPI_DMAR_RMRR_HEADER *)DmarHeader);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ break;
+ default:
+ break;
+ }
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+ return EFI_SUCCESS ;
+}
+
+/**
+ Get the DMAR ACPI table.
+
+ @retval EFI_SUCCESS The DMAR ACPI table is got.
+ @retval EFI_ALREADY_STARTED The DMAR ACPI table has been got previously.
+ @retval EFI_NOT_FOUND The DMAR ACPI table is not found.
+**/
+EFI_STATUS
+GetDmarAcpiTable (
+ VOID
+ )
+{
+ if (mAcpiDmarTable != NULL) {
+ return EFI_ALREADY_STARTED;
+ }
+
+ mAcpiDmarTable = (EFI_ACPI_DMAR_HEADER *) EfiLocateFirstAcpiTable (
+ EFI_ACPI_4_0_DMA_REMAPPING_TABLE_SIGNATURE
+ );
+ if (mAcpiDmarTable == NULL) {
+ return EFI_NOT_FOUND;
+ }
+ DEBUG ((DEBUG_INFO,"DMAR Table - 0x%08x\n", mAcpiDmarTable));
+ VtdDumpDmarTable();
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c
new file mode 100644
index 000000000..8449b2885
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.c
@@ -0,0 +1,412 @@
+/** @file
+ Intel VTd driver.
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+/**
+ Provides the controller-specific addresses required to access system memory from a
+ DMA bus master.
+
+ @param This The protocol instance pointer.
+ @param Operation Indicates if the bus master is going to read or write to system memory.
+ @param HostAddress The system memory address to map to the PCI controller.
+ @param NumberOfBytes On input the number of bytes to map. On output the number of bytes
+ that were mapped.
+ @param DeviceAddress The resulting map address for the bus master PCI controller to use to
+ access the hosts HostAddress.
+ @param Mapping A resulting value to pass to Unmap().
+
+ @retval EFI_SUCCESS The range was mapped for the returned NumberOfBytes.
+ @retval EFI_UNSUPPORTED The HostAddress cannot be mapped as a common buffer.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources.
+ @retval EFI_DEVICE_ERROR The system hardware could not map the requested address.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuMap (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EDKII_IOMMU_OPERATION Operation,
+ IN VOID *HostAddress,
+ IN OUT UINTN *NumberOfBytes,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT VOID **Mapping
+ );
+
+/**
+ Completes the Map() operation and releases any corresponding resources.
+
+ @param This The protocol instance pointer.
+ @param Mapping The mapping value returned from Map().
+
+ @retval EFI_SUCCESS The range was unmapped.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_DEVICE_ERROR The data was not committed to the target system memory.
+**/
+EFI_STATUS
+EFIAPI
+IoMmuUnmap (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN VOID *Mapping
+ );
+
+/**
+ Allocates pages that are suitable for an OperationBusMasterCommonBuffer or
+ OperationBusMasterCommonBuffer64 mapping.
+
+ @param This The protocol instance pointer.
+ @param Type This parameter is not used and must be ignored.
+ @param MemoryType The type of memory to allocate, EfiBootServicesData or
+ EfiRuntimeServicesData.
+ @param Pages The number of pages to allocate.
+ @param HostAddress A pointer to store the base system memory address of the
+ allocated range.
+ @param Attributes The requested bit mask of attributes for the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were allocated.
+ @retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are
+ MEMORY_WRITE_COMBINE, MEMORY_CACHED and DUAL_ADDRESS_CYCLE.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuAllocateBuffer (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EFI_ALLOCATE_TYPE Type,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN Pages,
+ IN OUT VOID **HostAddress,
+ IN UINT64 Attributes
+ );
+
+/**
+ Frees memory that was allocated with AllocateBuffer().
+
+ @param This The protocol instance pointer.
+ @param Pages The number of pages to free.
+ @param HostAddress The base system memory address of the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were freed.
+ @retval EFI_INVALID_PARAMETER The memory range specified by HostAddress and Pages
+ was not allocated with AllocateBuffer().
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuFreeBuffer (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN UINTN Pages,
+ IN VOID *HostAddress
+ );
+
+/**
+ This function fills DeviceHandle/IoMmuAccess to the MAP_HANDLE_INFO,
+ based upon the DeviceAddress.
+
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] DeviceAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+**/
+VOID
+SyncDeviceHandleToMapInfo (
+ IN EFI_HANDLE DeviceHandle,
+ IN EFI_PHYSICAL_ADDRESS DeviceAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ );
+
+/**
+ Convert the DeviceHandle to SourceId and Segment.
+
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[out] Segment The Segment used to identify a VTd engine.
+ @param[out] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @retval EFI_SUCCESS The Segment and SourceId are returned.
+ @retval EFI_INVALID_PARAMETER DeviceHandle is an invalid handle.
+ @retval EFI_UNSUPPORTED DeviceHandle is unknown by the IOMMU.
+**/
+EFI_STATUS
+DeviceHandleToSourceId (
+ IN EFI_HANDLE DeviceHandle,
+ OUT UINT16 *Segment,
+ OUT VTD_SOURCE_ID *SourceId
+ )
+{
+ EFI_PCI_IO_PROTOCOL *PciIo;
+ UINTN Seg;
+ UINTN Bus;
+ UINTN Dev;
+ UINTN Func;
+ EFI_STATUS Status;
+ EDKII_PLATFORM_VTD_DEVICE_INFO DeviceInfo;
+
+ Status = EFI_NOT_FOUND;
+ if (mPlatformVTdPolicy != NULL) {
+ Status = mPlatformVTdPolicy->GetDeviceId (mPlatformVTdPolicy, DeviceHandle, &DeviceInfo);
+ if (!EFI_ERROR(Status)) {
+ *Segment = DeviceInfo.Segment;
+ *SourceId = DeviceInfo.SourceId;
+ return EFI_SUCCESS;
+ }
+ }
+
+ Status = gBS->HandleProtocol (DeviceHandle, &gEfiPciIoProtocolGuid, (VOID **)&PciIo);
+ if (EFI_ERROR(Status)) {
+ return EFI_UNSUPPORTED;
+ }
+ Status = PciIo->GetLocation (PciIo, &Seg, &Bus, &Dev, &Func);
+ if (EFI_ERROR(Status)) {
+ return EFI_UNSUPPORTED;
+ }
+ *Segment = (UINT16)Seg;
+ SourceId->Bits.Bus = (UINT8)Bus;
+ SourceId->Bits.Device = (UINT8)Dev;
+ SourceId->Bits.Function = (UINT8)Func;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Set IOMMU attribute for a system memory.
+
+ If the IOMMU protocol exists, the system memory cannot be used
+ for DMA by default.
+
+ When a device requests a DMA access for a system memory,
+ the device driver need use SetAttribute() to update the IOMMU
+ attribute to request DMA access (read and/or write).
+
+ The DeviceHandle is used to identify which device submits the request.
+ The IOMMU implementation need translate the device path to an IOMMU device ID,
+ and set IOMMU hardware register accordingly.
+ 1) DeviceHandle can be a standard PCI device.
+ The memory for BusMasterRead need set EDKII_IOMMU_ACCESS_READ.
+ The memory for BusMasterWrite need set EDKII_IOMMU_ACCESS_WRITE.
+ The memory for BusMasterCommonBuffer need set EDKII_IOMMU_ACCESS_READ|EDKII_IOMMU_ACCESS_WRITE.
+ After the memory is used, the memory need set 0 to keep it being protected.
+ 2) DeviceHandle can be an ACPI device (ISA, I2C, SPI, etc).
+ The memory for DMA access need set EDKII_IOMMU_ACCESS_READ and/or EDKII_IOMMU_ACCESS_WRITE.
+
+ @param[in] This The protocol instance pointer.
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] DeviceAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by DeviceAddress and Length.
+ @retval EFI_INVALID_PARAMETER DeviceHandle is an invalid handle.
+ @retval EFI_INVALID_PARAMETER DeviceAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED DeviceHandle is unknown by the IOMMU.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by DeviceAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+
+**/
+EFI_STATUS
+VTdSetAttribute (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EFI_HANDLE DeviceHandle,
+ IN EFI_PHYSICAL_ADDRESS DeviceAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ EFI_STATUS Status;
+ UINT16 Segment;
+ VTD_SOURCE_ID SourceId;
+ CHAR8 PerfToken[sizeof("VTD(S0000.B00.D00.F00)")];
+ UINT32 Identifier;
+ VTD_PROTOCOL_SET_ATTRIBUTE LogSetAttribute;
+
+ DumpVtdIfError ();
+
+ Status = DeviceHandleToSourceId (DeviceHandle, &Segment, &SourceId);
+ if (EFI_ERROR(Status)) {
+ return Status;
+ }
+
+ DEBUG ((DEBUG_VERBOSE, "IoMmuSetAttribute: "));
+ DEBUG ((DEBUG_VERBOSE, "PCI(S%x.B%x.D%x.F%x) ", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ DEBUG ((DEBUG_VERBOSE, "(0x%lx~0x%lx) - %lx\n", DeviceAddress, Length, IoMmuAccess));
+
+ if (mAcpiDmarTable == NULL) {
+ //
+ // Record the entry to driver global variable.
+ // As such once VTd is activated, the setting can be adopted.
+ //
+ if ((PcdGet8 (PcdVTdPolicyPropertyMask) & BIT2) != 0) {
+ //
+ // Force no IOMMU access attribute request recording before DMAR table is installed.
+ //
+ ASSERT_EFI_ERROR (EFI_NOT_READY);
+ return EFI_NOT_READY;
+ }
+ Status = RequestAccessAttribute (Segment, SourceId, DeviceAddress, Length, IoMmuAccess);
+ } else {
+ PERF_CODE (
+ AsciiSPrint (PerfToken, sizeof(PerfToken), "S%04xB%02xD%02xF%01x", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function);
+ Identifier = (Segment << 16) | SourceId.Uint16;
+ PERF_START_EX (gImageHandle, PerfToken, "IntelVTD", 0, Identifier);
+ );
+
+ Status = SetAccessAttribute (Segment, SourceId, DeviceAddress, Length, IoMmuAccess);
+
+ PERF_CODE (
+ Identifier = (Segment << 16) | SourceId.Uint16;
+ PERF_END_EX (gImageHandle, PerfToken, "IntelVTD", 0, Identifier);
+ );
+ }
+
+ if (!EFI_ERROR(Status)) {
+ SyncDeviceHandleToMapInfo (
+ DeviceHandle,
+ DeviceAddress,
+ Length,
+ IoMmuAccess
+ );
+ }
+
+ LogSetAttribute.SourceId.Uint16 = SourceId.Uint16;
+ LogSetAttribute.DeviceAddress = DeviceAddress;
+ LogSetAttribute.Length = Length;
+ LogSetAttribute.IoMmuAccess = IoMmuAccess;
+ LogSetAttribute.Status = Status;
+ VTdLogAddDataEvent (VTDLOG_DXE_IOMMU_SET_ATTRIBUTE, 0, &LogSetAttribute, sizeof (VTD_PROTOCOL_SET_ATTRIBUTE));
+
+ return Status;
+}
+
+/**
+ Set IOMMU attribute for a system memory.
+
+ If the IOMMU protocol exists, the system memory cannot be used
+ for DMA by default.
+
+ When a device requests a DMA access for a system memory,
+ the device driver need use SetAttribute() to update the IOMMU
+ attribute to request DMA access (read and/or write).
+
+ The DeviceHandle is used to identify which device submits the request.
+ The IOMMU implementation need translate the device path to an IOMMU device ID,
+ and set IOMMU hardware register accordingly.
+ 1) DeviceHandle can be a standard PCI device.
+ The memory for BusMasterRead need set EDKII_IOMMU_ACCESS_READ.
+ The memory for BusMasterWrite need set EDKII_IOMMU_ACCESS_WRITE.
+ The memory for BusMasterCommonBuffer need set EDKII_IOMMU_ACCESS_READ|EDKII_IOMMU_ACCESS_WRITE.
+ After the memory is used, the memory need set 0 to keep it being protected.
+ 2) DeviceHandle can be an ACPI device (ISA, I2C, SPI, etc).
+ The memory for DMA access need set EDKII_IOMMU_ACCESS_READ and/or EDKII_IOMMU_ACCESS_WRITE.
+
+ @param[in] This The protocol instance pointer.
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] Mapping The mapping value returned from Map().
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by DeviceAddress and Length.
+ @retval EFI_INVALID_PARAMETER DeviceHandle is an invalid handle.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED DeviceHandle is unknown by the IOMMU.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by Mapping.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+
+**/
+EFI_STATUS
+EFIAPI
+IoMmuSetAttribute (
+ IN EDKII_IOMMU_PROTOCOL *This,
+ IN EFI_HANDLE DeviceHandle,
+ IN VOID *Mapping,
+ IN UINT64 IoMmuAccess
+ )
+{
+ EFI_STATUS Status;
+ EFI_PHYSICAL_ADDRESS DeviceAddress;
+ UINTN NumberOfPages;
+ EFI_TPL OriginalTpl;
+
+ OriginalTpl = gBS->RaiseTPL (VTD_TPL_LEVEL);
+
+ Status = GetDeviceInfoFromMapping (Mapping, &DeviceAddress, &NumberOfPages);
+ if (!EFI_ERROR(Status)) {
+ Status = VTdSetAttribute (
+ This,
+ DeviceHandle,
+ DeviceAddress,
+ EFI_PAGES_TO_SIZE(NumberOfPages),
+ IoMmuAccess
+ );
+ }
+
+ gBS->RestoreTPL (OriginalTpl);
+
+ return Status;
+}
+
+EDKII_IOMMU_PROTOCOL mIntelVTd = {
+ EDKII_IOMMU_PROTOCOL_REVISION,
+ IoMmuSetAttribute,
+ IoMmuMap,
+ IoMmuUnmap,
+ IoMmuAllocateBuffer,
+ IoMmuFreeBuffer,
+};
+
+/**
+ Initialize the VTd driver.
+
+ @param[in] ImageHandle ImageHandle of the loaded driver
+ @param[in] SystemTable Pointer to the System Table
+
+ @retval EFI_SUCCESS The Protocol is installed.
+ @retval EFI_OUT_OF_RESOURCES Not enough resources available to initialize driver.
+ @retval EFI_DEVICE_ERROR A device error occurred attempting to initialize the driver.
+
+**/
+EFI_STATUS
+EFIAPI
+IntelVTdInitialize (
+ IN EFI_HANDLE ImageHandle,
+ IN EFI_SYSTEM_TABLE *SystemTable
+ )
+{
+ EFI_STATUS Status;
+ EFI_HANDLE Handle;
+
+ if ((PcdGet8(PcdVTdPolicyPropertyMask) & BIT0) == 0) {
+ return EFI_UNSUPPORTED;
+ }
+
+ VTdLogInitialize ();
+
+ InitializeDmaProtection ();
+
+ Handle = NULL;
+ Status = gBS->InstallMultipleProtocolInterfaces (
+ &Handle,
+ &gEdkiiIoMmuProtocolGuid, &mIntelVTd,
+ NULL
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ VTdLogAddEvent (VTDLOG_DXE_INSTALL_IOMMU_PROTOCOL, Status, 0);
+
+ return Status;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf
new file mode 100644
index 000000000..210d6963f
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.inf
@@ -0,0 +1,93 @@
+## @file
+# Intel VTd DXE Driver.
+#
+# This driver initializes VTd engine based upon DMAR ACPI tables
+# and provide DMA protection to PCI or ACPI device.
+#
+# Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = IntelVTdCoreDxe
+ MODULE_UNI_FILE = IntelVTdCoreDxe.uni
+ FILE_GUID = 5c83381f-34d3-4672-b8f3-83c3d6f3b00e
+ MODULE_TYPE = DXE_DRIVER
+ VERSION_STRING = 1.0
+ ENTRY_POINT = IntelVTdInitialize
+
+#
+# The following information is for reference only and not required by the build tools.
+#
+# VALID_ARCHITECTURES = IA32 X64 EBC
+#
+#
+
+[Sources]
+ IntelVTdCoreDxe.c
+ BmDma.c
+ DmaProtection.c
+ DmaProtection.h
+ DmarAcpiTable.c
+ PciInfo.c
+ TranslationTable.c
+ TranslationTableEx.c
+ VtdLog.c
+ VtdReg.c
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ IntelSiliconPkg/IntelSiliconPkg.dec
+
+[LibraryClasses]
+ DebugLib
+ UefiDriverEntryPoint
+ UefiBootServicesTableLib
+ BaseLib
+ IoLib
+ HobLib
+ PciSegmentLib
+ BaseMemoryLib
+ MemoryAllocationLib
+ UefiLib
+ CacheMaintenanceLib
+ PerformanceLib
+ PrintLib
+ ReportStatusCodeLib
+ IntelVTdPeiDxeLib
+
+[Guids]
+ gVTdLogBufferHobGuid ## CONSUMES
+ gEfiEventExitBootServicesGuid ## CONSUMES ## Event
+ ## CONSUMES ## SystemTable
+ ## CONSUMES ## Event
+ gEfiAcpi20TableGuid
+ ## CONSUMES ## SystemTable
+ ## CONSUMES ## Event
+ gEfiAcpi10TableGuid
+
+[Protocols]
+ gEdkiiIoMmuProtocolGuid ## PRODUCES
+ gEfiPciIoProtocolGuid ## CONSUMES
+ gEfiPciEnumerationCompleteProtocolGuid ## CONSUMES
+ gEdkiiPlatformVTdPolicyProtocolGuid ## SOMETIMES_CONSUMES
+ gEfiPciRootBridgeIoProtocolGuid ## CONSUMES
+ gEdkiiVTdLogProtocolGuid ## PRODUCES
+
+[Pcd]
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPolicyPropertyMask ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdErrorCodeVTdError ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdSupportAbortDmaMode ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdLogLevel ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiPostMemLogBufferSize ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdDxeLogBufferSize ## CONSUMES
+
+[Depex]
+ gEfiPciRootBridgeIoProtocolGuid
+
+[UserExtensions.TianoCore."ExtraFiles"]
+ IntelVTdCoreDxeExtra.uni
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni
new file mode 100644
index 000000000..73d2c83c4
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxe.uni
@@ -0,0 +1,14 @@
+// /** @file
+// IntelVTdDxe Module Localized Abstract and Description Content
+//
+// Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+
+#string STR_MODULE_ABSTRACT #language en-US "Intel VTd CORE DXE Driver."
+
+#string STR_MODULE_DESCRIPTION #language en-US "This driver initializes VTd engine based upon DMAR ACPI tables and provide DMA protection to PCI or ACPI device."
+
--git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxeExtra.uni b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxeExtra.uni
new file mode 100644
index 000000000..7f1aec65e
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/IntelVTdCoreDxeExtra.uni
@@ -0,0 +1,14 @@
+// /** @file
+// IntelVTdDxe Localized Strings and Content
+//
+// Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+#string STR_PROPERTIES_MODULE_NAME
+#language en-US
+"Intel VTd CORE DXE Driver"
+
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/PciInfo.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/PciInfo.c
new file mode 100644
index 000000000..0eb832d6e
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/PciInfo.c
@@ -0,0 +1,418 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+/**
+ Return the index of PCI data.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @return The index of the PCI data.
+ @retval (UINTN)-1 The PCI data is not found.
+**/
+UINTN
+GetPciDataIndex (
+ IN UINTN VtdIndex,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ )
+{
+ UINTN Index;
+ VTD_SOURCE_ID *PciSourceId;
+
+ if (Segment != mVtdUnitInformation[VtdIndex].Segment) {
+ return (UINTN)-1;
+ }
+
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+ if ((PciSourceId->Bits.Bus == SourceId.Bits.Bus) &&
+ (PciSourceId->Bits.Device == SourceId.Bits.Device) &&
+ (PciSourceId->Bits.Function == SourceId.Bits.Function) ) {
+ return Index;
+ }
+ }
+
+ return (UINTN)-1;
+}
+
+/**
+ Register PCI device to VTd engine.
+
+ @param[in] VtdIndex The index of VTd engine.
+ @param[in] Segment The segment of the source.
+ @param[in] SourceId The SourceId of the source.
+ @param[in] DeviceType The DMAR device scope type.
+ @param[in] CheckExist TRUE: ERROR will be returned if the PCI device is already registered.
+ FALSE: SUCCESS will be returned if the PCI device is registered.
+
+ @retval EFI_SUCCESS The PCI device is registered.
+ @retval EFI_OUT_OF_RESOURCES No enough resource to register a new PCI device.
+ @retval EFI_ALREADY_STARTED The device is already registered.
+**/
+EFI_STATUS
+RegisterPciDevice (
+ IN UINTN VtdIndex,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT8 DeviceType,
+ IN BOOLEAN CheckExist
+ )
+{
+ PCI_DEVICE_INFORMATION *PciDeviceInfo;
+ VTD_SOURCE_ID *PciSourceId;
+ UINTN PciDataIndex;
+ UINTN Index;
+ PCI_DEVICE_INFORMATION *NewPciDeviceInfo;
+ EDKII_PLATFORM_VTD_PCI_DEVICE_ID *PciDeviceId;
+
+ PciDeviceInfo = mVtdUnitInformation[VtdIndex].PciDeviceInfo;
+
+ if (PciDeviceInfo->IncludeAllFlag) {
+ //
+ // Do not register device in other VTD Unit
+ //
+ for (Index = 0; Index < VtdIndex; Index++) {
+ PciDataIndex = GetPciDataIndex (Index, Segment, SourceId);
+ if (PciDataIndex != (UINTN)-1) {
+ DEBUG ((DEBUG_INFO, " RegisterPciDevice: PCI S%04x B%02x D%02x F%02x already registered by Other Vtd(%d)\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, Index));
+ return EFI_SUCCESS;
+ }
+ }
+ }
+
+ PciDataIndex = GetPciDataIndex (VtdIndex, Segment, SourceId);
+ if (PciDataIndex == (UINTN)-1) {
+ //
+ // Register new
+ //
+
+ if (PciDeviceInfo->PciDeviceDataNumber >= PciDeviceInfo->PciDeviceDataMaxNumber) {
+ //
+ // Reallocate
+ //
+ NewPciDeviceInfo = AllocateZeroPool (sizeof (PCI_DEVICE_INFORMATION) + sizeof (PCI_DEVICE_DATA) * (PciDeviceInfo->PciDeviceDataMaxNumber + MAX_VTD_PCI_DATA_NUMBER));
+ if (NewPciDeviceInfo == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ CopyMem (NewPciDeviceInfo, PciDeviceInfo, sizeof (PCI_DEVICE_INFORMATION) + sizeof (PCI_DEVICE_DATA) * (PciDeviceInfo->PciDeviceDataMaxNumber + MAX_VTD_PCI_DATA_NUMBER));
+ FreePool (PciDeviceInfo);
+
+ NewPciDeviceInfo->PciDeviceDataMaxNumber += MAX_VTD_PCI_DATA_NUMBER;
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo = NewPciDeviceInfo;
+ PciDeviceInfo = NewPciDeviceInfo;
+ }
+
+ ASSERT (PciDeviceInfo->PciDeviceDataNumber < PciDeviceInfo->PciDeviceDataMaxNumber);
+
+ PciSourceId = &PciDeviceInfo->PciDeviceData[PciDeviceInfo->PciDeviceDataNumber].PciSourceId;
+ PciSourceId->Bits.Bus = SourceId.Bits.Bus;
+ PciSourceId->Bits.Device = SourceId.Bits.Device;
+ PciSourceId->Bits.Function = SourceId.Bits.Function;
+
+ DEBUG ((DEBUG_INFO, " RegisterPciDevice: PCI S%04x B%02x D%02x F%02x", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ PciDeviceId = &PciDeviceInfo->PciDeviceData[PciDeviceInfo->PciDeviceDataNumber].PciDeviceId;
+ if ((DeviceType == EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT) ||
+ (DeviceType == EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE)) {
+ PciDeviceId->VendorId = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_VENDOR_ID_OFFSET));
+ PciDeviceId->DeviceId = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_DEVICE_ID_OFFSET));
+ PciDeviceId->RevisionId = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_REVISION_ID_OFFSET));
+
+ DEBUG ((DEBUG_INFO, " (%04x:%04x:%02x", PciDeviceId->VendorId, PciDeviceId->DeviceId, PciDeviceId->RevisionId));
+
+ if (DeviceType == EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT) {
+ PciDeviceId->SubsystemVendorId = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_SUBSYSTEM_VENDOR_ID_OFFSET));
+ PciDeviceId->SubsystemDeviceId = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, PCI_SUBSYSTEM_ID_OFFSET));
+ DEBUG ((DEBUG_INFO, ":%04x:%04x", PciDeviceId->SubsystemVendorId, PciDeviceId->SubsystemDeviceId));
+ }
+ DEBUG ((DEBUG_INFO, ")"));
+ }
+
+ PciDeviceInfo->PciDeviceData[PciDeviceInfo->PciDeviceDataNumber].DeviceType = DeviceType;
+
+ if ((DeviceType != EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT) &&
+ (DeviceType != EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE)) {
+ DEBUG ((DEBUG_INFO, " (*)"));
+ }
+ DEBUG ((DEBUG_INFO, "\n"));
+
+ PciDeviceInfo->PciDeviceDataNumber++;
+ } else {
+ if (CheckExist) {
+ DEBUG ((DEBUG_INFO, " RegisterPciDevice: PCI S%04x B%02x D%02x F%02x already registered\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ return EFI_ALREADY_STARTED;
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ The scan bus callback function to register PCI device.
+
+ @param[in] Context The context of the callback.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Device The device of the source.
+ @param[in] Function The function of the source.
+
+ @retval EFI_SUCCESS The PCI device is registered.
+**/
+EFI_STATUS
+EFIAPI
+ScanBusCallbackRegisterPciDevice (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN UINT8 Device,
+ IN UINT8 Function
+ )
+{
+ VTD_SOURCE_ID SourceId;
+ UINTN VtdIndex;
+ UINT8 BaseClass;
+ UINT8 SubClass;
+ UINT8 DeviceType;
+ EFI_STATUS Status;
+
+ VtdIndex = (UINTN)Context;
+ SourceId.Bits.Bus = Bus;
+ SourceId.Bits.Device = Device;
+ SourceId.Bits.Function = Function;
+
+ DeviceType = EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT;
+ BaseClass = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_CLASSCODE_OFFSET + 2));
+ if (BaseClass == PCI_CLASS_BRIDGE) {
+ SubClass = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_CLASSCODE_OFFSET + 1));
+ if (SubClass == PCI_CLASS_BRIDGE_P2P) {
+ DeviceType = EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE;
+ }
+ }
+
+ Status = RegisterPciDevice (VtdIndex, Segment, SourceId, DeviceType, FALSE);
+ return Status;
+}
+
+/**
+ Scan PCI bus and invoke callback function for each PCI devices under the bus.
+
+ @param[in] Context The context of the callback function.
+ @param[in] Segment The segment of the source.
+ @param[in] Bus The bus of the source.
+ @param[in] Callback The callback function in PCI scan.
+
+ @retval EFI_SUCCESS The PCI devices under the bus are scaned.
+**/
+EFI_STATUS
+ScanPciBus (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN UINT8 Bus,
+ IN SCAN_BUS_FUNC_CALLBACK_FUNC Callback
+ )
+{
+ UINT8 Device;
+ UINT8 Function;
+ UINT8 SecondaryBusNumber;
+ UINT8 HeaderType;
+ UINT8 BaseClass;
+ UINT8 SubClass;
+ UINT16 VendorID;
+ UINT16 DeviceID;
+ EFI_STATUS Status;
+
+ // Scan the PCI bus for devices
+ for (Device = 0; Device <= PCI_MAX_DEVICE; Device++) {
+ for (Function = 0; Function <= PCI_MAX_FUNC; Function++) {
+ VendorID = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_VENDOR_ID_OFFSET));
+ DeviceID = PciSegmentRead16 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_DEVICE_ID_OFFSET));
+ if (VendorID == 0xFFFF && DeviceID == 0xFFFF) {
+ if (Function == 0) {
+ //
+ // If function 0 is not implemented, do not scan other functions.
+ //
+ break;
+ }
+ continue;
+ }
+
+ Status = Callback (Context, Segment, Bus, Device, Function);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ BaseClass = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_CLASSCODE_OFFSET + 2));
+ if (BaseClass == PCI_CLASS_BRIDGE) {
+ SubClass = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_CLASSCODE_OFFSET + 1));
+ if (SubClass == PCI_CLASS_BRIDGE_P2P) {
+ SecondaryBusNumber = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, Function, PCI_BRIDGE_SECONDARY_BUS_REGISTER_OFFSET));
+ DEBUG ((DEBUG_INFO," ScanPciBus: PCI bridge S%04x B%02x D%02x F%02x (SecondBus:%02x)\n", Segment, Bus, Device, Function, SecondaryBusNumber));
+ if (SecondaryBusNumber != 0) {
+ Status = ScanPciBus (Context, Segment, SecondaryBusNumber, Callback);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ }
+ }
+ }
+
+ if (Function == 0) {
+ HeaderType = PciSegmentRead8 (PCI_SEGMENT_LIB_ADDRESS(Segment, Bus, Device, 0, PCI_HEADER_TYPE_OFFSET));
+ if ((HeaderType & HEADER_TYPE_MULTI_FUNCTION) == 0x00) {
+ //
+ // It is not a multi-function device, do not scan other functions.
+ //
+ break;
+ }
+ }
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Scan PCI bus and invoke callback function for each PCI devices under all root bus.
+
+ @param[in] Context The context of the callback function.
+ @param[in] Segment The segment of the source.
+ @param[in] Callback The callback function in PCI scan.
+
+ @retval EFI_SUCCESS The PCI devices under the bus are scaned.
+**/
+EFI_STATUS
+ScanAllPciBus (
+ IN VOID *Context,
+ IN UINT16 Segment,
+ IN SCAN_BUS_FUNC_CALLBACK_FUNC Callback
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+ UINTN HandleCount;
+ EFI_HANDLE *HandleBuffer;
+ EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL *PciRootBridgeIo;
+ EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR *Descriptors;
+
+ DEBUG ((DEBUG_INFO, "ScanAllPciBus ()\n"));
+
+ Status = gBS->LocateHandleBuffer (
+ ByProtocol,
+ &gEfiPciRootBridgeIoProtocolGuid,
+ NULL,
+ &HandleCount,
+ &HandleBuffer
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ DEBUG ((DEBUG_INFO,"Find %d root bridges\n", HandleCount));
+
+ for (Index = 0; Index < HandleCount; Index++) {
+ Status = gBS->HandleProtocol (
+ HandleBuffer[Index],
+ &gEfiPciRootBridgeIoProtocolGuid,
+ (VOID **) &PciRootBridgeIo
+ );
+ ASSERT_EFI_ERROR (Status);
+
+ Status = PciRootBridgeIo->Configuration (PciRootBridgeIo, (VOID **) &Descriptors);
+ ASSERT_EFI_ERROR (Status);
+
+ while (Descriptors->Desc != ACPI_END_TAG_DESCRIPTOR) {
+ if (Descriptors->ResType == ACPI_ADDRESS_SPACE_TYPE_BUS) {
+ break;
+ }
+ Descriptors++;
+ }
+
+ if (Descriptors->Desc == ACPI_END_TAG_DESCRIPTOR) {
+ continue;
+ }
+
+ DEBUG ((DEBUG_INFO,"Scan root bridges : %d, Segment : %d, Bus : 0x%02X\n", Index, PciRootBridgeIo->SegmentNumber, Descriptors->AddrRangeMin));
+ Status = ScanPciBus(Context, (UINT16) PciRootBridgeIo->SegmentNumber, (UINT8) Descriptors->AddrRangeMin, Callback);
+ if (EFI_ERROR (Status)) {
+ break;
+ }
+ }
+
+ FreePool(HandleBuffer);
+
+ return Status;
+}
+
+/**
+ Find the VTd index by the Segment and SourceId.
+
+ @param[in] Segment The segment of the source.
+ @param[in] SourceId The SourceId of the source.
+ @param[out] ExtContextEntry The ExtContextEntry of the source.
+ @param[out] ContextEntry The ContextEntry of the source.
+
+ @return The index of the VTd engine.
+ @retval (UINTN)-1 The VTd engine is not found.
+**/
+UINTN
+FindVtdIndexByPciDevice (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ OUT VTD_EXT_CONTEXT_ENTRY **ExtContextEntry,
+ OUT VTD_CONTEXT_ENTRY **ContextEntry
+ )
+{
+ UINTN VtdIndex;
+ VTD_ROOT_ENTRY *RootEntry;
+ VTD_CONTEXT_ENTRY *ContextEntryTable;
+ VTD_CONTEXT_ENTRY *ThisContextEntry;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntry;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntryTable;
+ VTD_EXT_CONTEXT_ENTRY *ThisExtContextEntry;
+ UINTN PciDataIndex;
+
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ if (Segment != mVtdUnitInformation[VtdIndex].Segment) {
+ continue;
+ }
+
+ PciDataIndex = GetPciDataIndex (VtdIndex, Segment, SourceId);
+ if (PciDataIndex == (UINTN)-1) {
+ continue;
+ }
+
+// DEBUG ((DEBUG_INFO,"FindVtdIndex(0x%x) for S%04x B%02x D%02x F%02x\n", VtdIndex, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ if (mVtdUnitInformation[VtdIndex].ExtRootEntryTable != 0) {
+ ExtRootEntry = &mVtdUnitInformation[VtdIndex].ExtRootEntryTable[SourceId.Index.RootIndex];
+ ExtContextEntryTable = (VTD_EXT_CONTEXT_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(ExtRootEntry->Bits.LowerContextTablePointerLo, ExtRootEntry->Bits.LowerContextTablePointerHi) ;
+ ThisExtContextEntry = &ExtContextEntryTable[SourceId.Index.ContextIndex];
+ if (ThisExtContextEntry->Bits.AddressWidth == 0) {
+ continue;
+ }
+ *ExtContextEntry = ThisExtContextEntry;
+ *ContextEntry = NULL;
+ } else {
+ RootEntry = &mVtdUnitInformation[VtdIndex].RootEntryTable[SourceId.Index.RootIndex];
+ ContextEntryTable = (VTD_CONTEXT_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(RootEntry->Bits.ContextTablePointerLo, RootEntry->Bits.ContextTablePointerHi) ;
+ ThisContextEntry = &ContextEntryTable[SourceId.Index.ContextIndex];
+ if (ThisContextEntry->Bits.AddressWidth == 0) {
+ continue;
+ }
+ *ExtContextEntry = NULL;
+ *ContextEntry = ThisContextEntry;
+ }
+
+ return VtdIndex;
+ }
+
+ return (UINTN)-1;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTable.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTable.c
new file mode 100644
index 000000000..37ca6e405
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTable.c
@@ -0,0 +1,1112 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+/**
+ Create extended context entry.
+
+ @param[in] VtdIndex The index of the VTd engine.
+
+ @retval EFI_SUCCESS The extended context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create extended context entry.
+**/
+EFI_STATUS
+CreateExtContextEntry (
+ IN UINTN VtdIndex
+ );
+
+/**
+ Allocate zero pages.
+
+ @param[in] Pages the number of pages.
+
+ @return the page address.
+ @retval NULL No resource to allocate pages.
+**/
+VOID *
+EFIAPI
+AllocateZeroPages (
+ IN UINTN Pages
+ )
+{
+ VOID *Addr;
+
+ Addr = AllocatePages (Pages);
+ if (Addr == NULL) {
+ return NULL;
+ }
+ ZeroMem (Addr, EFI_PAGES_TO_SIZE(Pages));
+ return Addr;
+}
+
+/**
+ Set second level paging entry attribute based upon IoMmuAccess.
+
+ @param[in] PtEntry The paging entry.
+ @param[in] IoMmuAccess The IOMMU access.
+**/
+VOID
+SetSecondLevelPagingEntryAttribute (
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PtEntry,
+ IN UINT64 IoMmuAccess
+ )
+{
+ PtEntry->Bits.Read = ((IoMmuAccess & EDKII_IOMMU_ACCESS_READ) != 0);
+ PtEntry->Bits.Write = ((IoMmuAccess & EDKII_IOMMU_ACCESS_WRITE) != 0);
+}
+
+/**
+ Create context entry.
+
+ @param[in] VtdIndex The index of the VTd engine.
+
+ @retval EFI_SUCCESS The context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create context entry.
+**/
+EFI_STATUS
+CreateContextEntry (
+ IN UINTN VtdIndex
+ )
+{
+ UINTN Index;
+ VOID *Buffer;
+ UINTN RootPages;
+ UINTN ContextPages;
+ VTD_ROOT_ENTRY *RootEntry;
+ VTD_CONTEXT_ENTRY *ContextEntryTable;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+ VTD_SOURCE_ID *PciSourceId;
+ VTD_SOURCE_ID SourceId;
+ UINTN MaxBusNumber;
+ UINTN EntryTablePages;
+
+ MaxBusNumber = 0;
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+ if (PciSourceId->Bits.Bus > MaxBusNumber) {
+ MaxBusNumber = PciSourceId->Bits.Bus;
+ }
+ }
+ DEBUG ((DEBUG_INFO," MaxBusNumber - 0x%x\n", MaxBusNumber));
+
+ RootPages = EFI_SIZE_TO_PAGES (sizeof (VTD_ROOT_ENTRY) * VTD_ROOT_ENTRY_NUMBER);
+ ContextPages = EFI_SIZE_TO_PAGES (sizeof (VTD_CONTEXT_ENTRY) * VTD_CONTEXT_ENTRY_NUMBER);
+ EntryTablePages = RootPages + ContextPages * (MaxBusNumber + 1);
+ Buffer = AllocateZeroPages (EntryTablePages);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_INFO,"Could not Alloc Root Entry Table.. \n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+ mVtdUnitInformation[VtdIndex].RootEntryTable = (VTD_ROOT_ENTRY *)Buffer;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (RootPages);
+
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+
+ SourceId.Bits.Bus = PciSourceId->Bits.Bus;
+ SourceId.Bits.Device = PciSourceId->Bits.Device;
+ SourceId.Bits.Function = PciSourceId->Bits.Function;
+
+ RootEntry = &mVtdUnitInformation[VtdIndex].RootEntryTable[SourceId.Index.RootIndex];
+ if (RootEntry->Bits.Present == 0) {
+ RootEntry->Bits.ContextTablePointerLo = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 12);
+ RootEntry->Bits.ContextTablePointerHi = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 32);
+ RootEntry->Bits.Present = 1;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (ContextPages);
+ }
+
+ ContextEntryTable = (VTD_CONTEXT_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(RootEntry->Bits.ContextTablePointerLo, RootEntry->Bits.ContextTablePointerHi) ;
+ ContextEntry = &ContextEntryTable[SourceId.Index.ContextIndex];
+ ContextEntry->Bits.TranslationType = 0;
+ ContextEntry->Bits.FaultProcessingDisable = 0;
+ ContextEntry->Bits.Present = 0;
+
+ DEBUG ((DEBUG_INFO,"Source: S%04x B%02x D%02x F%02x\n", mVtdUnitInformation[VtdIndex].Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = FALSE;
+ if ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT3) != 0) {
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = TRUE;
+ if ((mAcpiDmarTable->HostAddressWidth <= 48) &&
+ ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT2) != 0)) {
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = FALSE;
+ }
+ } else if ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT2) == 0) {
+ DEBUG((DEBUG_ERROR, "!!!! Page-table type is not supported on VTD %d !!!!\n", VtdIndex));
+ return EFI_UNSUPPORTED;
+ }
+
+ if (mVtdUnitInformation[VtdIndex].Is5LevelPaging) {
+ ContextEntry->Bits.AddressWidth = 0x3;
+ DEBUG((DEBUG_INFO, "Using 5-level page-table on VTD %d\n", VtdIndex));
+ } else {
+ ContextEntry->Bits.AddressWidth = 0x2;
+ DEBUG((DEBUG_INFO, "Using 4-level page-table on VTD %d\n", VtdIndex));
+ }
+ }
+
+ FlushPageTableMemory (VtdIndex, (UINTN)mVtdUnitInformation[VtdIndex].RootEntryTable, EFI_PAGES_TO_SIZE(EntryTablePages));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Create second level paging entry table.
+
+ @param[in] VtdIndex The index of the VTd engine.
+ @param[in] SecondLevelPagingEntry The second level paging entry.
+ @param[in] MemoryBase The base of the memory.
+ @param[in] MemoryLimit The limit of the memory.
+ @param[in] IoMmuAccess The IOMMU access.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+
+ @return The second level paging entry.
+**/
+VTD_SECOND_LEVEL_PAGING_ENTRY *
+CreateSecondLevelPagingEntryTable (
+ IN UINTN VtdIndex,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 MemoryBase,
+ IN UINT64 MemoryLimit,
+ IN UINT64 IoMmuAccess,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index5;
+ UINTN Index4;
+ UINTN Index3;
+ UINTN Index2;
+ UINTN Lvl5Start;
+ UINTN Lvl5End;
+ UINTN Lvl4PagesStart;
+ UINTN Lvl4PagesEnd;
+ UINTN Lvl4Start;
+ UINTN Lvl4End;
+ UINTN Lvl3Start;
+ UINTN Lvl3End;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl5PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl4PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl3PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl2PtEntry;
+ UINT64 BaseAddress;
+ UINT64 EndAddress;
+
+ if (MemoryLimit == 0) {
+ return NULL;
+ }
+
+ Lvl4PagesStart = 0;
+ Lvl4PagesEnd = 0;
+ Lvl4PtEntry = NULL;
+ Lvl5PtEntry = NULL;
+
+ BaseAddress = ALIGN_VALUE_LOW(MemoryBase, SIZE_2MB);
+ EndAddress = ALIGN_VALUE_UP(MemoryLimit, SIZE_2MB);
+ DEBUG ((DEBUG_INFO,"CreateSecondLevelPagingEntryTable: BaseAddress - 0x%016lx, EndAddress - 0x%016lx\n", BaseAddress, EndAddress));
+
+ if (SecondLevelPagingEntry == NULL) {
+ SecondLevelPagingEntry = AllocateZeroPages (1);
+ if (SecondLevelPagingEntry == NULL) {
+ DEBUG ((DEBUG_ERROR,"Could not Alloc LVL4 or LVL5 PT. \n"));
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)SecondLevelPagingEntry, EFI_PAGES_TO_SIZE(1));
+ }
+
+ //
+ // If no access is needed, just create not present entry.
+ //
+ if (IoMmuAccess == 0) {
+ return SecondLevelPagingEntry;
+ }
+
+ if (Is5LevelPaging) {
+ Lvl5Start = RShiftU64 (BaseAddress, 48) & 0x1FF;
+ Lvl5End = RShiftU64 (EndAddress - 1, 48) & 0x1FF;
+ DEBUG ((DEBUG_INFO," Lvl5Start - 0x%x, Lvl5End - 0x%x\n", Lvl5Start, Lvl5End));
+
+ Lvl4Start = RShiftU64 (BaseAddress, 39) & 0x1FF;
+ Lvl4End = RShiftU64 (EndAddress - 1, 39) & 0x1FF;
+
+ Lvl4PagesStart = (Lvl5Start<<9) | Lvl4Start;
+ Lvl4PagesEnd = (Lvl5End<<9) | Lvl4End;
+ DEBUG ((DEBUG_INFO," Lvl4PagesStart - 0x%x, Lvl4PagesEnd - 0x%x\n", Lvl4PagesStart, Lvl4PagesEnd));
+
+ Lvl5PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+ } else {
+ Lvl5Start = RShiftU64 (BaseAddress, 48) & 0x1FF;
+ Lvl5End = Lvl5Start;
+
+ Lvl4Start = RShiftU64 (BaseAddress, 39) & 0x1FF;
+ Lvl4End = RShiftU64 (EndAddress - 1, 39) & 0x1FF;
+ DEBUG ((DEBUG_INFO," Lvl4Start - 0x%x, Lvl4End - 0x%x\n", Lvl4Start, Lvl4End));
+
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+ }
+
+ for (Index5 = Lvl5Start; Index5 <= Lvl5End; Index5++) {
+ if (Is5LevelPaging) {
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ Lvl5PtEntry[Index5].Uint64 = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index5));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)Lvl5PtEntry[Index5].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl5PtEntry[Index5], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+ Lvl4Start = Lvl4PagesStart & 0x1FF;
+ if (((Index5+1)<<9) > Lvl4PagesEnd) {
+ Lvl4End = SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) - 1;;
+ Lvl4PagesStart = (Index5+1)<<9;
+ } else {
+ Lvl4End = Lvl4PagesEnd & 0x1FF;
+ }
+ DEBUG ((DEBUG_INFO," Lvl5(0x%x): Lvl4Start - 0x%x, Lvl4End - 0x%x\n", Index5, Lvl4Start, Lvl4End));
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl5PtEntry[Index5].Bits.AddressLo, Lvl5PtEntry[Index5].Bits.AddressHi);
+ }
+
+ for (Index4 = Lvl4Start; Index4 <= Lvl4End; Index4++) {
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ Lvl4PtEntry[Index4].Uint64 = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)Lvl4PtEntry[Index4].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl4PtEntry[Index4], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+
+ Lvl3Start = RShiftU64 (BaseAddress, 30) & 0x1FF;
+ if (ALIGN_VALUE_LOW(BaseAddress + SIZE_1GB, SIZE_1GB) <= EndAddress) {
+ Lvl3End = SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) - 1;
+ } else {
+ Lvl3End = RShiftU64 (EndAddress - 1, 30) & 0x1FF;
+ }
+ DEBUG ((DEBUG_INFO," Lvl4(0x%x): Lvl3Start - 0x%x, Lvl3End - 0x%x\n", Index4, Lvl3Start, Lvl3End));
+
+ Lvl3PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl4PtEntry[Index4].Bits.AddressLo, Lvl4PtEntry[Index4].Bits.AddressHi);
+ for (Index3 = Lvl3Start; Index3 <= Lvl3End; Index3++) {
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ Lvl3PtEntry[Index3].Uint64 = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL3 PAGE FAIL (0x%x, 0x%x)!!!!!!\n", Index4, Index3));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)Lvl3PtEntry[Index3].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl3PtEntry[Index3], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+
+ Lvl2PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl3PtEntry[Index3].Bits.AddressLo, Lvl3PtEntry[Index3].Bits.AddressHi);
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index2++) {
+ Lvl2PtEntry[Index2].Uint64 = BaseAddress;
+ SetSecondLevelPagingEntryAttribute (&Lvl2PtEntry[Index2], IoMmuAccess);
+ Lvl2PtEntry[Index2].Bits.PageSize = 1;
+ BaseAddress += SIZE_2MB;
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)Lvl2PtEntry, SIZE_4KB);
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)&Lvl3PtEntry[Lvl3Start], (UINTN)&Lvl3PtEntry[Lvl3End + 1] - (UINTN)&Lvl3PtEntry[Lvl3Start]);
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)&Lvl4PtEntry[Lvl4Start], (UINTN)&Lvl4PtEntry[Lvl4End + 1] - (UINTN)&Lvl4PtEntry[Lvl4Start]);
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)&Lvl5PtEntry[Lvl5Start], (UINTN)&Lvl5PtEntry[Lvl5End + 1] - (UINTN)&Lvl5PtEntry[Lvl5Start]);
+
+ return SecondLevelPagingEntry;
+}
+
+/**
+ Create second level paging entry.
+
+ @param[in] VtdIndex The index of the VTd engine.
+ @param[in] IoMmuAccess The IOMMU access.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+
+ @return The second level paging entry.
+**/
+VTD_SECOND_LEVEL_PAGING_ENTRY *
+CreateSecondLevelPagingEntry (
+ IN UINTN VtdIndex,
+ IN UINT64 IoMmuAccess,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+
+ SecondLevelPagingEntry = NULL;
+ SecondLevelPagingEntry = CreateSecondLevelPagingEntryTable (VtdIndex, SecondLevelPagingEntry, 0, mBelow4GMemoryLimit, IoMmuAccess, Is5LevelPaging);
+ if (SecondLevelPagingEntry == NULL) {
+ return NULL;
+ }
+
+ if (mAbove4GMemoryLimit != 0) {
+ ASSERT (mAbove4GMemoryLimit > BASE_4GB);
+ SecondLevelPagingEntry = CreateSecondLevelPagingEntryTable (VtdIndex, SecondLevelPagingEntry, SIZE_4GB, mAbove4GMemoryLimit, IoMmuAccess, Is5LevelPaging);
+ if (SecondLevelPagingEntry == NULL) {
+ return NULL;
+ }
+ }
+
+ return SecondLevelPagingEntry;
+}
+
+/**
+ Setup VTd translation table.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCE Setup translation table fail.
+**/
+EFI_STATUS
+SetupTranslationTable (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ DEBUG((DEBUG_INFO, "CreateContextEntry - %d\n", Index));
+
+ if (mVtdUnitInformation[Index].ECapReg.Bits.SMTS) {
+ if (mVtdUnitInformation[Index].ECapReg.Bits.DEP_24) {
+ DEBUG ((DEBUG_ERROR,"ECapReg.bit24 is not zero\n"));
+ ASSERT(FALSE);
+ Status = EFI_UNSUPPORTED;
+ } else {
+ Status = CreateContextEntry (Index);
+ }
+ } else {
+ if (mVtdUnitInformation[Index].ECapReg.Bits.DEP_24) {
+ //
+ // To compatible with pervious VTd engine
+ // It was ECS(Extended Context Support) bit.
+ //
+ Status = CreateExtContextEntry (Index);
+ } else {
+ Status = CreateContextEntry (Index);
+ }
+ }
+
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Dump DMAR second level paging entry.
+
+ @param[in] SecondLevelPagingEntry The second level paging entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+DumpSecondLevelPagingEntry (
+ IN VOID *SecondLevelPagingEntry,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index5;
+ UINTN Index4;
+ UINTN Index3;
+ UINTN Index2;
+ UINTN Index1;
+ UINTN Lvl5IndexEnd;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl5PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl4PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl3PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl2PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl1PtEntry;
+
+ DEBUG ((DEBUG_VERBOSE,"================\n"));
+ DEBUG ((DEBUG_VERBOSE,"DMAR Second Level Page Table:\n"));
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry Base - 0x%x, Is5LevelPaging - %d\n", SecondLevelPagingEntry, Is5LevelPaging));
+
+ Lvl5IndexEnd = Is5LevelPaging ? SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) : 1;
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+ Lvl5PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+
+ for (Index5 = 0; Index5 < Lvl5IndexEnd; Index5++) {
+ if (Is5LevelPaging) {
+ if (Lvl5PtEntry[Index5].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl5Pt Entry(0x%03x) - 0x%016lx\n", Index5, Lvl5PtEntry[Index5].Uint64));
+ }
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ continue;
+ }
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl5PtEntry[Index5].Bits.AddressLo, Lvl5PtEntry[Index5].Bits.AddressHi);
+ }
+
+ for (Index4 = 0; Index4 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index4++) {
+ if (Lvl4PtEntry[Index4].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl4Pt Entry(0x%03x) - 0x%016lx\n", Index4, Lvl4PtEntry[Index4].Uint64));
+ }
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ continue;
+ }
+ Lvl3PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl4PtEntry[Index4].Bits.AddressLo, Lvl4PtEntry[Index4].Bits.AddressHi);
+ for (Index3 = 0; Index3 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index3++) {
+ if (Lvl3PtEntry[Index3].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl3Pt Entry(0x%03x) - 0x%016lx\n", Index3, Lvl3PtEntry[Index3].Uint64));
+ }
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ continue;
+ }
+
+ Lvl2PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl3PtEntry[Index3].Bits.AddressLo, Lvl3PtEntry[Index3].Bits.AddressHi);
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index2++) {
+ if (Lvl2PtEntry[Index2].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl2Pt Entry(0x%03x) - 0x%016lx\n", Index2, Lvl2PtEntry[Index2].Uint64));
+ }
+ if (Lvl2PtEntry[Index2].Uint64 == 0) {
+ continue;
+ }
+ if (Lvl2PtEntry[Index2].Bits.PageSize == 0) {
+ Lvl1PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl2PtEntry[Index2].Bits.AddressLo, Lvl2PtEntry[Index2].Bits.AddressHi);
+ for (Index1 = 0; Index1 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index1++) {
+ if (Lvl1PtEntry[Index1].Uint64 != 0) {
+ DEBUG ((DEBUG_VERBOSE," Lvl1Pt Entry(0x%03x) - 0x%016lx\n", Index1, Lvl1PtEntry[Index1].Uint64));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ DEBUG ((DEBUG_VERBOSE,"================\n"));
+}
+
+/**
+ Invalid page entry.
+
+ @param VtdIndex The VTd engine index.
+**/
+VOID
+InvalidatePageEntry (
+ IN UINTN VtdIndex
+ )
+{
+ if (mVtdUnitInformation[VtdIndex].HasDirtyContext || mVtdUnitInformation[VtdIndex].HasDirtyPages) {
+ InvalidateVtdIOTLBGlobal (VtdIndex);
+ }
+ mVtdUnitInformation[VtdIndex].HasDirtyContext = FALSE;
+ mVtdUnitInformation[VtdIndex].HasDirtyPages = FALSE;
+}
+
+#define VTD_PG_R BIT0
+#define VTD_PG_W BIT1
+#define VTD_PG_X BIT2
+#define VTD_PG_EMT (BIT3 | BIT4 | BIT5)
+#define VTD_PG_TM (BIT62)
+
+#define VTD_PG_PS BIT7
+
+#define PAGE_PROGATE_BITS (VTD_PG_TM | VTD_PG_EMT | VTD_PG_W | VTD_PG_R)
+
+#define PAGING_4K_MASK 0xFFF
+#define PAGING_2M_MASK 0x1FFFFF
+#define PAGING_1G_MASK 0x3FFFFFFF
+
+#define PAGING_VTD_INDEX_MASK 0x1FF
+
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
+
+typedef enum {
+ PageNone,
+ Page4K,
+ Page2M,
+ Page1G,
+} PAGE_ATTRIBUTE;
+
+typedef struct {
+ PAGE_ATTRIBUTE Attribute;
+ UINT64 Length;
+ UINT64 AddressMask;
+} PAGE_ATTRIBUTE_TABLE;
+
+PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
+ {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64},
+ {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64},
+ {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64},
+};
+
+/**
+ Return length according to page attributes.
+
+ @param[in] PageAttributes The page attribute of the page entry.
+
+ @return The length of page entry.
+**/
+UINTN
+PageAttributeToLength (
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINTN Index;
+ for (Index = 0; Index < sizeof(mPageAttributeTable)/sizeof(mPageAttributeTable[0]); Index++) {
+ if (PageAttribute == mPageAttributeTable[Index].Attribute) {
+ return (UINTN)mPageAttributeTable[Index].Length;
+ }
+ }
+ return 0;
+}
+
+/**
+ Return page table entry to match the address.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] Address The address to be checked.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+ @param[out] PageAttributes The page attribute of the page entry.
+
+ @return The page entry.
+**/
+VOID *
+GetSecondLevelPageTableEntry (
+ IN UINTN VtdIndex,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN PHYSICAL_ADDRESS Address,
+ IN BOOLEAN Is5LevelPaging,
+ OUT PAGE_ATTRIBUTE *PageAttribute
+ )
+{
+ UINTN Index1;
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINTN Index5;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ UINT64 *L5PageTable;
+
+ Index5 = ((UINTN)RShiftU64 (Address, 48)) & PAGING_VTD_INDEX_MASK;
+ Index4 = ((UINTN)RShiftU64 (Address, 39)) & PAGING_VTD_INDEX_MASK;
+ Index3 = ((UINTN)Address >> 30) & PAGING_VTD_INDEX_MASK;
+ Index2 = ((UINTN)Address >> 21) & PAGING_VTD_INDEX_MASK;
+ Index1 = ((UINTN)Address >> 12) & PAGING_VTD_INDEX_MASK;
+
+ if (Is5LevelPaging) {
+ L5PageTable = (UINT64 *)SecondLevelPagingEntry;
+ if (L5PageTable[Index5] == 0) {
+ L5PageTable[Index5] = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (L5PageTable[Index5] == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL5 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)L5PageTable[Index5], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *)&L5PageTable[Index5], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)&L5PageTable[Index5], sizeof(L5PageTable[Index5]));
+ }
+ L4PageTable = (UINT64 *)(UINTN)(L5PageTable[Index5] & PAGING_4K_ADDRESS_MASK_64);
+ } else {
+ L4PageTable = (UINT64 *)SecondLevelPagingEntry;
+ }
+
+ if (L4PageTable[Index4] == 0) {
+ L4PageTable[Index4] = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (L4PageTable[Index4] == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)L4PageTable[Index4], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *)&L4PageTable[Index4], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)&L4PageTable[Index4], sizeof(L4PageTable[Index4]));
+ }
+
+ L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
+ if (L3PageTable[Index3] == 0) {
+ L3PageTable[Index3] = (UINT64)(UINTN)AllocateZeroPages (1);
+ if (L3PageTable[Index3] == 0) {
+ DEBUG ((DEBUG_ERROR,"!!!!!! ALLOCATE LVL3 PAGE FAIL (0x%x, 0x%x)!!!!!!\n", Index4, Index3));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)L3PageTable[Index3], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *)&L3PageTable[Index3], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)&L3PageTable[Index3], sizeof(L3PageTable[Index3]));
+ }
+ if ((L3PageTable[Index3] & VTD_PG_PS) != 0) {
+ // 1G
+ *PageAttribute = Page1G;
+ return &L3PageTable[Index3];
+ }
+
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable[Index2] == 0) {
+ L2PageTable[Index2] = Address & PAGING_2M_ADDRESS_MASK_64;
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *)&L2PageTable[Index2], 0);
+ L2PageTable[Index2] |= VTD_PG_PS;
+ FlushPageTableMemory (VtdIndex, (UINTN)&L2PageTable[Index2], sizeof(L2PageTable[Index2]));
+ }
+ if ((L2PageTable[Index2] & VTD_PG_PS) != 0) {
+ // 2M
+ *PageAttribute = Page2M;
+ return &L2PageTable[Index2];
+ }
+
+ // 4k
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
+ if ((L1PageTable[Index1] == 0) && (Address != 0)) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ *PageAttribute = Page4K;
+ return &L1PageTable[Index1];
+}
+
+/**
+ Modify memory attributes of page entry.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] PageEntry The page entry.
+ @param[in] IoMmuAccess The IOMMU access.
+ @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
+**/
+VOID
+ConvertSecondLevelPageEntryAttribute (
+ IN UINTN VtdIndex,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry,
+ IN UINT64 IoMmuAccess,
+ OUT BOOLEAN *IsModified
+ )
+{
+ UINT64 CurrentPageEntry;
+ UINT64 NewPageEntry;
+
+ CurrentPageEntry = PageEntry->Uint64;
+ SetSecondLevelPagingEntryAttribute (PageEntry, IoMmuAccess);
+ FlushPageTableMemory (VtdIndex, (UINTN)PageEntry, sizeof(*PageEntry));
+ NewPageEntry = PageEntry->Uint64;
+ if (CurrentPageEntry != NewPageEntry) {
+ *IsModified = TRUE;
+ DEBUG ((DEBUG_VERBOSE, "ConvertSecondLevelPageEntryAttribute 0x%lx", CurrentPageEntry));
+ DEBUG ((DEBUG_VERBOSE, "->0x%lx\n", NewPageEntry));
+ } else {
+ *IsModified = FALSE;
+ }
+}
+
+/**
+ This function returns if there is need to split page entry.
+
+ @param[in] BaseAddress The base address to be checked.
+ @param[in] Length The length to be checked.
+ @param[in] PageAttribute The page attribute of the page entry.
+
+ @retval SplitAttributes on if there is need to split page entry.
+**/
+PAGE_ATTRIBUTE
+NeedSplitPage (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINT64 PageEntryLength;
+
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+
+ if (((BaseAddress & (PageEntryLength - 1)) == 0) && (Length >= PageEntryLength)) {
+ return PageNone;
+ }
+
+ if (((BaseAddress & PAGING_2M_MASK) != 0) || (Length < SIZE_2MB)) {
+ return Page4K;
+ }
+
+ return Page2M;
+}
+
+/**
+ This function splits one page entry to small page entries.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] PageEntry The page entry to be splitted.
+ @param[in] PageAttribute The page attribute of the page entry.
+ @param[in] SplitAttribute How to split the page entry.
+
+ @retval RETURN_SUCCESS The page entry is splitted.
+ @retval RETURN_UNSUPPORTED The page entry does not support to be splitted.
+ @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
+**/
+RETURN_STATUS
+SplitSecondLevelPage (
+ IN UINTN VtdIndex,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry,
+ IN PAGE_ATTRIBUTE PageAttribute,
+ IN PAGE_ATTRIBUTE SplitAttribute
+ )
+{
+ UINT64 BaseAddress;
+ UINT64 *NewPageEntry;
+ UINTN Index;
+
+ ASSERT (PageAttribute == Page2M || PageAttribute == Page1G);
+
+ if (PageAttribute == Page2M) {
+ //
+ // Split 2M to 4K
+ //
+ ASSERT (SplitAttribute == Page4K);
+ if (SplitAttribute == Page4K) {
+ NewPageEntry = AllocateZeroPages (1);
+ DEBUG ((DEBUG_VERBOSE, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = PageEntry->Uint64 & PAGING_2M_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_4KB * Index) | (PageEntry->Uint64 & PAGE_PROGATE_BITS);
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)NewPageEntry, SIZE_4KB);
+
+ PageEntry->Uint64 = (UINT64)(UINTN)NewPageEntry;
+ SetSecondLevelPagingEntryAttribute (PageEntry, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)PageEntry, sizeof(*PageEntry));
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else if (PageAttribute == Page1G) {
+ //
+ // Split 1G to 2M
+ // No need support 1G->4K directly, we should use 1G->2M, then 2M->4K to get more compact page table.
+ //
+ ASSERT (SplitAttribute == Page2M || SplitAttribute == Page4K);
+ if ((SplitAttribute == Page2M || SplitAttribute == Page4K)) {
+ NewPageEntry = AllocateZeroPages (1);
+ DEBUG ((DEBUG_VERBOSE, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = PageEntry->Uint64 & PAGING_1G_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_2MB * Index) | VTD_PG_PS | (PageEntry->Uint64 & PAGE_PROGATE_BITS);
+ }
+ FlushPageTableMemory (VtdIndex, (UINTN)NewPageEntry, SIZE_4KB);
+
+ PageEntry->Uint64 = (UINT64)(UINTN)NewPageEntry;
+ SetSecondLevelPagingEntryAttribute (PageEntry, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdIndex, (UINTN)PageEntry, sizeof(*PageEntry));
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+}
+
+/**
+ Set VTd attribute for a system memory on second level page entry
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] DomainIdentifier The domain ID of the source.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetSecondLevelPagingAttribute (
+ IN UINTN VtdIndex,
+ IN UINT16 DomainIdentifier,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry;
+ PAGE_ATTRIBUTE PageAttribute;
+ UINTN PageEntryLength;
+ PAGE_ATTRIBUTE SplitAttribute;
+ EFI_STATUS Status;
+ BOOLEAN IsEntryModified;
+
+ DEBUG ((DEBUG_VERBOSE,"SetSecondLevelPagingAttribute (%d) (0x%016lx - 0x%016lx : %x) \n", VtdIndex, BaseAddress, Length, IoMmuAccess));
+ DEBUG ((DEBUG_VERBOSE," SecondLevelPagingEntry Base - 0x%x\n", SecondLevelPagingEntry));
+
+ if (BaseAddress != ALIGN_VALUE(BaseAddress, SIZE_4KB)) {
+ DEBUG ((DEBUG_ERROR, "SetSecondLevelPagingAttribute - Invalid Alignment\n"));
+ return EFI_UNSUPPORTED;
+ }
+ if (Length != ALIGN_VALUE(Length, SIZE_4KB)) {
+ DEBUG ((DEBUG_ERROR, "SetSecondLevelPagingAttribute - Invalid Alignment\n"));
+ return EFI_UNSUPPORTED;
+ }
+
+ while (Length != 0) {
+ PageEntry = GetSecondLevelPageTableEntry (VtdIndex, SecondLevelPagingEntry, BaseAddress, mVtdUnitInformation[VtdIndex].Is5LevelPaging, &PageAttribute);
+ if (PageEntry == NULL) {
+ DEBUG ((DEBUG_ERROR, "PageEntry - NULL\n"));
+ return RETURN_UNSUPPORTED;
+ }
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+ SplitAttribute = NeedSplitPage (BaseAddress, Length, PageAttribute);
+ if (SplitAttribute == PageNone) {
+ ConvertSecondLevelPageEntryAttribute (VtdIndex, PageEntry, IoMmuAccess, &IsEntryModified);
+ if (IsEntryModified) {
+ mVtdUnitInformation[VtdIndex].HasDirtyPages = TRUE;
+ }
+ //
+ // Convert success, move to next
+ //
+ BaseAddress += PageEntryLength;
+ Length -= PageEntryLength;
+ } else {
+ Status = SplitSecondLevelPage (VtdIndex, PageEntry, PageAttribute, SplitAttribute);
+ if (RETURN_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "SplitSecondLevelPage - %r\n", Status));
+ return RETURN_UNSUPPORTED;
+ }
+ mVtdUnitInformation[VtdIndex].HasDirtyPages = TRUE;
+ //
+ // Just split current page
+ // Convert success in next around
+ //
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Set VTd attribute for a system memory.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] DomainIdentifier The domain ID of the source.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetPageAttribute (
+ IN UINTN VtdIndex,
+ IN UINT16 DomainIdentifier,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ EFI_STATUS Status;
+ Status = EFI_NOT_FOUND;
+ if (SecondLevelPagingEntry != NULL) {
+ Status = SetSecondLevelPagingAttribute (VtdIndex, DomainIdentifier, SecondLevelPagingEntry, BaseAddress, Length, IoMmuAccess);
+ }
+ return Status;
+}
+
+/**
+ Set VTd attribute for a system memory.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetAccessAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ UINTN VtdIndex;
+ EFI_STATUS Status;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+ UINT64 Pt;
+ UINTN PciDataIndex;
+ UINT16 DomainIdentifier;
+
+ SecondLevelPagingEntry = NULL;
+
+ DEBUG ((DEBUG_VERBOSE,"SetAccessAttribute (S%04x B%02x D%02x F%02x) (0x%016lx - 0x%08x, %x)\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function, BaseAddress, (UINTN)Length, IoMmuAccess));
+
+ VtdIndex = FindVtdIndexByPciDevice (Segment, SourceId, &ExtContextEntry, &ContextEntry);
+ if (VtdIndex == (UINTN)-1) {
+ DEBUG ((DEBUG_ERROR,"SetAccessAttribute - Pci device (S%04x B%02x D%02x F%02x) not found!\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ return EFI_DEVICE_ERROR;
+ }
+
+ PciDataIndex = GetPciDataIndex (VtdIndex, Segment, SourceId);
+ mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[PciDataIndex].AccessCount++;
+ //
+ // DomainId should not be 0.
+ //
+ DomainIdentifier = (UINT16)(PciDataIndex + 1);
+
+ if (ExtContextEntry != NULL) {
+ if (ExtContextEntry->Bits.Present == 0) {
+ SecondLevelPagingEntry = CreateSecondLevelPagingEntry (VtdIndex, 0, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry - 0x%x (S%04x B%02x D%02x F%02x) New\n", SecondLevelPagingEntry, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ Pt = (UINT64)RShiftU64 ((UINT64)(UINTN)SecondLevelPagingEntry, 12);
+
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ExtContextEntry->Bits.DomainIdentifier = DomainIdentifier;
+ ExtContextEntry->Bits.Present = 1;
+ FlushPageTableMemory (VtdIndex, (UINTN)ExtContextEntry, sizeof(*ExtContextEntry));
+ VtdLibDumpDmarExtContextEntryTable (NULL, NULL, mVtdUnitInformation[VtdIndex].ExtRootEntryTable, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ mVtdUnitInformation[VtdIndex].HasDirtyContext = TRUE;
+ } else {
+ SecondLevelPagingEntry = (VOID *)(UINTN)VTD_64BITS_ADDRESS(ExtContextEntry->Bits.SecondLevelPageTranslationPointerLo, ExtContextEntry->Bits.SecondLevelPageTranslationPointerHi);
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry - 0x%x (S%04x B%02x D%02x F%02x)\n", SecondLevelPagingEntry, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ }
+ } else if (ContextEntry != NULL) {
+ if (ContextEntry->Bits.Present == 0) {
+ SecondLevelPagingEntry = CreateSecondLevelPagingEntry (VtdIndex, 0, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry - 0x%x (S%04x B%02x D%02x F%02x) New\n", SecondLevelPagingEntry, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ Pt = (UINT64)RShiftU64 ((UINT64)(UINTN)SecondLevelPagingEntry, 12);
+
+ ContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ContextEntry->Bits.DomainIdentifier = DomainIdentifier;
+ ContextEntry->Bits.Present = 1;
+ FlushPageTableMemory (VtdIndex, (UINTN)ContextEntry, sizeof(*ContextEntry));
+ VtdLibDumpDmarContextEntryTable (NULL, NULL, mVtdUnitInformation[VtdIndex].RootEntryTable, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ mVtdUnitInformation[VtdIndex].HasDirtyContext = TRUE;
+ } else {
+ SecondLevelPagingEntry = (VOID *)(UINTN)VTD_64BITS_ADDRESS(ContextEntry->Bits.SecondLevelPageTranslationPointerLo, ContextEntry->Bits.SecondLevelPageTranslationPointerHi);
+ DEBUG ((DEBUG_VERBOSE,"SecondLevelPagingEntry - 0x%x (S%04x B%02x D%02x F%02x)\n", SecondLevelPagingEntry, Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ }
+ }
+
+ //
+ // Do not update FixedSecondLevelPagingEntry
+ //
+ if (SecondLevelPagingEntry != mVtdUnitInformation[VtdIndex].FixedSecondLevelPagingEntry) {
+ Status = SetPageAttribute (
+ VtdIndex,
+ DomainIdentifier,
+ SecondLevelPagingEntry,
+ BaseAddress,
+ Length,
+ IoMmuAccess
+ );
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR,"SetPageAttribute - %r\n", Status));
+ return Status;
+ }
+ }
+
+ InvalidatePageEntry (VtdIndex);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Always enable the VTd page attribute for the device.
+
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @retval EFI_SUCCESS The VTd entry is updated to always enable all DMA access for the specific device.
+**/
+EFI_STATUS
+AlwaysEnablePageAttribute (
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ )
+{
+ UINTN VtdIndex;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+ UINT64 Pt;
+
+ DEBUG ((DEBUG_INFO,"AlwaysEnablePageAttribute (S%04x B%02x D%02x F%02x)\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ VtdIndex = FindVtdIndexByPciDevice (Segment, SourceId, &ExtContextEntry, &ContextEntry);
+ if (VtdIndex == (UINTN)-1) {
+ DEBUG ((DEBUG_ERROR,"AlwaysEnablePageAttribute - Pci device (S%04x B%02x D%02x F%02x) not found!\n", Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ return EFI_DEVICE_ERROR;
+ }
+
+ if (mVtdUnitInformation[VtdIndex].FixedSecondLevelPagingEntry == 0) {
+ DEBUG((DEBUG_INFO, "CreateSecondLevelPagingEntry - %d\n", VtdIndex));
+ mVtdUnitInformation[VtdIndex].FixedSecondLevelPagingEntry = CreateSecondLevelPagingEntry (VtdIndex, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE, mVtdUnitInformation[VtdIndex].Is5LevelPaging);
+ }
+
+ SecondLevelPagingEntry = mVtdUnitInformation[VtdIndex].FixedSecondLevelPagingEntry;
+ Pt = (UINT64)RShiftU64 ((UINT64)(UINTN)SecondLevelPagingEntry, 12);
+ if (ExtContextEntry != NULL) {
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ExtContextEntry->Bits.DomainIdentifier = ((1 << (UINT8)((UINTN)mVtdUnitInformation[VtdIndex].CapReg.Bits.ND * 2 + 4)) - 1);
+ ExtContextEntry->Bits.Present = 1;
+ FlushPageTableMemory (VtdIndex, (UINTN)ExtContextEntry, sizeof(*ExtContextEntry));
+ } else if (ContextEntry != NULL) {
+ ContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ContextEntry->Bits.DomainIdentifier = ((1 << (UINT8)((UINTN)mVtdUnitInformation[VtdIndex].CapReg.Bits.ND * 2 + 4)) - 1);
+ ContextEntry->Bits.Present = 1;
+ FlushPageTableMemory (VtdIndex, (UINTN)ContextEntry, sizeof(*ContextEntry));
+ }
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTableEx.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTableEx.c
new file mode 100644
index 000000000..c07afaf2b
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/TranslationTableEx.c
@@ -0,0 +1,108 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+/**
+ Create extended context entry.
+
+ @param[in] VtdIndex The index of the VTd engine.
+
+ @retval EFI_SUCCESS The extended context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create extended context entry.
+**/
+EFI_STATUS
+CreateExtContextEntry (
+ IN UINTN VtdIndex
+ )
+{
+ UINTN Index;
+ VOID *Buffer;
+ UINTN RootPages;
+ UINTN ContextPages;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntry;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntryTable;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+ VTD_SOURCE_ID *PciSourceId;
+ VTD_SOURCE_ID SourceId;
+ UINTN MaxBusNumber;
+ UINTN EntryTablePages;
+
+ MaxBusNumber = 0;
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+ if (PciSourceId->Bits.Bus > MaxBusNumber) {
+ MaxBusNumber = PciSourceId->Bits.Bus;
+ }
+ }
+ DEBUG ((DEBUG_INFO," MaxBusNumber - 0x%x\n", MaxBusNumber));
+
+ RootPages = EFI_SIZE_TO_PAGES (sizeof (VTD_EXT_ROOT_ENTRY) * VTD_ROOT_ENTRY_NUMBER);
+ ContextPages = EFI_SIZE_TO_PAGES (sizeof (VTD_EXT_CONTEXT_ENTRY) * VTD_CONTEXT_ENTRY_NUMBER);
+ EntryTablePages = RootPages + ContextPages * (MaxBusNumber + 1);
+ Buffer = AllocateZeroPages (EntryTablePages);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_INFO,"Could not Alloc Root Entry Table.. \n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+ mVtdUnitInformation[VtdIndex].ExtRootEntryTable = (VTD_EXT_ROOT_ENTRY *)Buffer;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (RootPages);
+
+ for (Index = 0; Index < mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ PciSourceId = &mVtdUnitInformation[VtdIndex].PciDeviceInfo->PciDeviceData[Index].PciSourceId;
+
+ SourceId.Bits.Bus = PciSourceId->Bits.Bus;
+ SourceId.Bits.Device = PciSourceId->Bits.Device;
+ SourceId.Bits.Function = PciSourceId->Bits.Function;
+
+ ExtRootEntry = &mVtdUnitInformation[VtdIndex].ExtRootEntryTable[SourceId.Index.RootIndex];
+ if (ExtRootEntry->Bits.LowerPresent == 0) {
+ ExtRootEntry->Bits.LowerContextTablePointerLo = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 12);
+ ExtRootEntry->Bits.LowerContextTablePointerHi = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 32);
+ ExtRootEntry->Bits.LowerPresent = 1;
+ ExtRootEntry->Bits.UpperContextTablePointerLo = (UINT32) RShiftU64 ((UINT64)(UINTN)Buffer, 12) + 1;
+ ExtRootEntry->Bits.UpperContextTablePointerHi = (UINT32) RShiftU64 (RShiftU64 ((UINT64)(UINTN)Buffer, 12) + 1, 20);
+ ExtRootEntry->Bits.UpperPresent = 1;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (ContextPages);
+ }
+
+ ExtContextEntryTable = (VTD_EXT_CONTEXT_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(ExtRootEntry->Bits.LowerContextTablePointerLo, ExtRootEntry->Bits.LowerContextTablePointerHi) ;
+ ExtContextEntry = &ExtContextEntryTable[SourceId.Index.ContextIndex];
+ ExtContextEntry->Bits.TranslationType = 0;
+ ExtContextEntry->Bits.FaultProcessingDisable = 0;
+ ExtContextEntry->Bits.Present = 0;
+
+ DEBUG ((DEBUG_INFO,"DOMAIN: S%04x, B%02x D%02x F%02x\n", mVtdUnitInformation[VtdIndex].Segment, SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = FALSE;
+ if ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT3) != 0) {
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = TRUE;
+ if ((mAcpiDmarTable->HostAddressWidth <= 48) &&
+ ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT2) != 0)) {
+ mVtdUnitInformation[VtdIndex].Is5LevelPaging = FALSE;
+ }
+ } else if ((mVtdUnitInformation[VtdIndex].CapReg.Bits.SAGAW & BIT2) == 0) {
+ DEBUG((DEBUG_ERROR, "!!!! Page-table type is not supported on VTD %d !!!!\n", VtdIndex));
+ return EFI_UNSUPPORTED;
+ }
+
+ if (mVtdUnitInformation[VtdIndex].Is5LevelPaging) {
+ ExtContextEntry->Bits.AddressWidth = 0x3;
+ DEBUG((DEBUG_INFO, "Using 5-level page-table on VTD %d\n", VtdIndex));
+ } else {
+ ExtContextEntry->Bits.AddressWidth = 0x2;
+ DEBUG((DEBUG_INFO, "Using 4-level page-table on VTD %d\n", VtdIndex));
+ }
+
+
+ }
+
+ FlushPageTableMemory (VtdIndex, (UINTN)mVtdUnitInformation[VtdIndex].ExtRootEntryTable, EFI_PAGES_TO_SIZE(EntryTablePages));
+
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdLog.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdLog.c
new file mode 100644
index 000000000..0ac4758ff
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdLog.c
@@ -0,0 +1,383 @@
+/** @file
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+UINT8 *mVtdLogBuffer = NULL;
+
+UINT8 *mVtdLogDxeFreeBuffer = NULL;
+UINT32 mVtdLogDxeBufferUsed = 0;
+
+UINT32 mVtdLogPeiPostMemBufferUsed = 0;
+
+UINT8 mVtdLogPeiError = 0;
+UINT16 mVtdLogDxeError = 0;
+
+/**
+ Allocate memory buffer for VTd log items.
+
+ @param[in] MemorySize Required memory buffer size.
+
+ @retval Buffer address
+
+**/
+UINT8 *
+EFIAPI
+VTdLogAllocMemory (
+ IN CONST UINT32 MemorySize
+ )
+{
+ UINT8 *Buffer;
+
+ Buffer = NULL;
+ if (mVtdLogDxeFreeBuffer != NULL) {
+ if ((mVtdLogDxeBufferUsed + MemorySize) <= PcdGet32 (PcdVTdDxeLogBufferSize)) {
+ Buffer = mVtdLogDxeFreeBuffer;
+
+ mVtdLogDxeFreeBuffer += MemorySize;
+ mVtdLogDxeBufferUsed += MemorySize;
+ } else {
+ mVtdLogDxeError |= VTD_LOG_ERROR_BUFFER_FULL;
+ }
+ }
+ return Buffer;
+}
+
+/**
+ Add a new VTd log event.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+
+**/
+VOID
+EFIAPI
+VTdLogAddEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Data1,
+ IN CONST UINT64 Data2
+ )
+{
+ VTDLOG_EVENT_2PARAM *Item;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ } else if ((PcdGet8 (PcdVTdLogLevel) == 1) && (EventType >= VTDLOG_DXE_ADVANCED)) {
+ return;
+ }
+
+ Item = (VTDLOG_EVENT_2PARAM *) VTdLogAllocMemory (sizeof (VTDLOG_EVENT_2PARAM));
+ if (Item != NULL) {
+ Item->Data1 = Data1;
+ Item->Data2 = Data2;
+
+ Item->Header.DataSize = sizeof (VTDLOG_EVENT_2PARAM);
+ Item->Header.LogType = (UINT64) 1 << EventType;
+ Item->Header.Timestamp = AsmReadTsc ();
+ }
+}
+
+/**
+ Add a new VTd log event with data.
+
+ @param[in] EventType Event type
+ @param[in] Param parameter
+ @param[in] Data Data
+ @param[in] DataSize Data size
+
+**/
+VOID
+EFIAPI
+VTdLogAddDataEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Param,
+ IN CONST VOID *Data,
+ IN CONST UINT32 DataSize
+ )
+{
+ VTDLOG_EVENT_CONTEXT *Item;
+ UINT32 EventSize;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ } else if ((PcdGet8 (PcdVTdLogLevel) == 1) && (EventType >= VTDLOG_DXE_ADVANCED)) {
+ return;
+ }
+
+ EventSize = sizeof (VTDLOG_EVENT_CONTEXT) + DataSize - 1;
+
+ Item = (VTDLOG_EVENT_CONTEXT *) VTdLogAllocMemory (EventSize);
+ if (Item != NULL) {
+ Item->Param = Param;
+ CopyMem (Item->Data, Data, DataSize);
+
+ Item->Header.DataSize = EventSize;
+ Item->Header.LogType = (UINT64) 1 << EventType;
+ Item->Header.Timestamp = AsmReadTsc ();
+ }
+}
+
+/**
+ Get Event Items From Pei Pre-Mem Buffer
+
+ @param[in] Buffer Pre-Memory data buffer.
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+**/
+UINT64
+EFIAPI
+VTdGetEventItemsFromPeiPreMemBuffer (
+ IN VTDLOG_PEI_PRE_MEM_INFO *InfoBuffer,
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ )
+{
+ UINTN Index;
+ UINT64 EventCount;
+ VTDLOG_EVENT_2PARAM Event;
+
+ if (InfoBuffer == NULL) {
+ return 0;
+ }
+
+ EventCount = 0;
+ for (Index = 0; Index < VTD_LOG_PEI_PRE_MEM_BAR_MAX; Index++) {
+ if (InfoBuffer[Index].Mode == VTD_LOG_PEI_PRE_MEM_NOT_USED) {
+ continue;
+ }
+ if (CallbackHandle) {
+ Event.Header.DataSize = sizeof (VTDLOG_EVENT_2PARAM);
+ Event.Header.Timestamp = 0;
+
+ Event.Header.LogType = ((UINT64) 1) << VTDLOG_PEI_PRE_MEM_DMA_PROTECT;
+ Event.Data1 = InfoBuffer[Index].BarAddress;
+ Event.Data2 = InfoBuffer[Index].Mode;
+ Event.Data2 |= InfoBuffer[Index].Status<<8;
+ CallbackHandle (Context, &Event.Header);
+ }
+ EventCount++;
+ }
+
+ return EventCount;
+}
+
+/**
+ Get Event Items From Pei Post-Mem/Dxe Buffer
+
+ @param[in] Buffer Data buffer.
+ @param[in] BufferUsed Data buffer used.
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+**/
+UINT64
+EFIAPI
+VTdGetEventItemsFromBuffer (
+ IN UINT8 *Buffer,
+ IN UINT32 BufferUsed,
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ )
+{
+ UINT64 Count;
+ VTDLOG_EVENT_HEADER *Header;
+
+ Count = 0;
+ if (Buffer != NULL) {
+ while (BufferUsed > 0) {
+ Header = (VTDLOG_EVENT_HEADER *) Buffer;
+ if (BufferUsed >= Header->DataSize) {
+ if (CallbackHandle) {
+ CallbackHandle (Context, Header);
+ }
+ Buffer += Header->DataSize;
+ BufferUsed -= Header->DataSize;
+ Count++;
+ } else {
+ BufferUsed = 0;
+ }
+ }
+ }
+
+ return Count;
+}
+
+/**
+ Generate the VTd log state.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+**/
+VOID
+EFIAPI
+VTdGenerateStateEvent (
+ IN VTDLOG_EVENT_TYPE EventType,
+ IN UINT64 Data1,
+ IN UINT64 Data2,
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ )
+{
+ VTDLOG_EVENT_2PARAM Item;
+
+ Item.Data1 = Data1;
+ Item.Data2 = Data2;
+
+ Item.Header.DataSize = sizeof (VTDLOG_EVENT_2PARAM);
+ Item.Header.LogType = (UINT64) 1 << EventType;
+ Item.Header.Timestamp = 0;
+
+ if (CallbackHandle) {
+ CallbackHandle (Context, &Item.Header);
+ }
+}
+
+/**
+ Get the VTd log events.
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+
+ @retval UINT32 Number of events
+**/
+UINT64
+EFIAPI
+VTdLogGetEvents (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ )
+{
+ UINT64 CountPeiPreMem;
+ UINT64 CountPeiPostMem;
+ UINT64 CountDxe;
+ UINT8 *Buffer;
+
+ if (mVtdLogBuffer == NULL) {
+ return 0;
+ }
+
+ //
+ // PEI pre-memory phase
+ //
+ Buffer = &mVtdLogBuffer[PcdGet32 (PcdVTdDxeLogBufferSize) + PcdGet32 (PcdVTdPeiPostMemLogBufferSize)];
+ CountPeiPreMem = VTdGetEventItemsFromPeiPreMemBuffer ((VTDLOG_PEI_PRE_MEM_INFO *) Buffer, Context, CallbackHandle);
+ DEBUG ((DEBUG_INFO, "Find %d in PEI pre mem phase\n", CountPeiPreMem));
+
+ //
+ // PEI post memory phase
+ //
+ Buffer = &mVtdLogBuffer[PcdGet32 (PcdVTdDxeLogBufferSize)];
+ CountPeiPostMem = VTdGetEventItemsFromBuffer (Buffer, mVtdLogPeiPostMemBufferUsed, Context, CallbackHandle);
+ if (mVtdLogPeiError != 0) {
+ VTdGenerateStateEvent (VTDLOG_PEI_BASIC, mVtdLogPeiError, 0, Context, CallbackHandle);
+ CountPeiPostMem++;
+ }
+ DEBUG ((DEBUG_INFO, "Find %d in PEI post mem phase\n", CountPeiPostMem));
+
+ //
+ // DXE phase
+ //
+ Buffer = &mVtdLogBuffer[0];
+ CountDxe = VTdGetEventItemsFromBuffer (Buffer, mVtdLogDxeBufferUsed, Context, CallbackHandle);
+ if (mVtdLogDxeError != 0) {
+ VTdGenerateStateEvent (VTDLOG_DXE_BASIC, mVtdLogDxeError, 0, Context, CallbackHandle);
+ CountDxe++;
+ }
+ DEBUG ((DEBUG_INFO, "Find %d in DXE phase\n", CountDxe));
+
+ return CountPeiPreMem + CountPeiPostMem + CountDxe;
+}
+
+EDKII_VTD_LOG_PROTOCOL mIntelVTdLog = {
+ EDKII_VTD_LOG_PROTOCOL_REVISION,
+ VTdLogGetEvents
+};
+
+/**
+ Initializes the VTd Log.
+
+**/
+VOID
+EFIAPI
+VTdLogInitialize(
+ VOID
+ )
+{
+ UINT32 TotalBufferSize;
+ EFI_STATUS Status;
+ VOID *HobPtr;
+ VTDLOG_PEI_BUFFER_HOB *HobPeiBuffer;
+ EFI_HANDLE Handle;
+ UINT32 BufferOffset;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ }
+
+ if (mVtdLogBuffer != NULL) {
+ return;
+ }
+
+ TotalBufferSize = PcdGet32 (PcdVTdDxeLogBufferSize) + PcdGet32 (PcdVTdPeiPostMemLogBufferSize) + sizeof (VTDLOG_PEI_PRE_MEM_INFO) * VTD_LOG_PEI_PRE_MEM_BAR_MAX;
+
+ Status = gBS->AllocatePool (EfiBootServicesData, TotalBufferSize, &mVtdLogBuffer);
+ if (EFI_ERROR (Status)) {
+ return;
+ }
+
+ //
+ // DXE Buffer
+ //
+ if (PcdGet32 (PcdVTdDxeLogBufferSize) > 0) {
+ mVtdLogDxeFreeBuffer = mVtdLogBuffer;
+ mVtdLogDxeBufferUsed = 0;
+ }
+
+ //
+ // Get PEI pre-memory buffer offset
+ //
+ BufferOffset = PcdGet32 (PcdVTdDxeLogBufferSize) + PcdGet32 (PcdVTdPeiPostMemLogBufferSize);
+
+ HobPtr = GetFirstGuidHob (&gVTdLogBufferHobGuid);
+ if (HobPtr != NULL) {
+ HobPeiBuffer = GET_GUID_HOB_DATA (HobPtr);
+
+ //
+ // Copy PEI pre-memory phase VTd log.
+ //
+ CopyMem (&mVtdLogBuffer[BufferOffset], &HobPeiBuffer->PreMemInfo, sizeof (VTDLOG_PEI_PRE_MEM_INFO) * VTD_LOG_PEI_PRE_MEM_BAR_MAX);
+
+ //
+ // Copy PEI post-memory pase VTd log.
+ //
+ BufferOffset = PcdGet32 (PcdVTdDxeLogBufferSize);
+ if (PcdGet32 (PcdVTdPeiPostMemLogBufferSize) > 0) {
+ if (HobPeiBuffer->PostMemBufferUsed > 0) {
+ mVtdLogPeiPostMemBufferUsed = HobPeiBuffer->PostMemBufferUsed;
+ CopyMem (&mVtdLogBuffer[BufferOffset], (UINT8 *) (UINTN) HobPeiBuffer->PostMemBuffer, mVtdLogPeiPostMemBufferUsed);
+ }
+ }
+
+ mVtdLogPeiError = HobPeiBuffer->VtdLogPeiError;
+ } else {
+ //
+ // Do not find PEI Vtd log, clear PEI pre-memory phase buffer.
+ //
+ ZeroMem (&mVtdLogBuffer[BufferOffset], sizeof (VTDLOG_PEI_PRE_MEM_INFO) * VTD_LOG_PEI_PRE_MEM_BAR_MAX);
+ }
+
+ Handle = NULL;
+ Status = gBS->InstallMultipleProtocolInterfaces (
+ &Handle,
+ &gEdkiiVTdLogProtocolGuid,
+ &mIntelVTdLog,
+ NULL
+ );
+ ASSERT_EFI_ERROR (Status);
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdReg.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdReg.c
new file mode 100644
index 000000000..dd0c49698
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCoreDxe/VtdReg.c
@@ -0,0 +1,757 @@
+/** @file
+
+ Copyright (c) 2017 - 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "DmaProtection.h"
+
+#define VTD_CAP_REG_NFR_MAX (256)
+
+UINTN mVtdUnitNumber = 0;
+VTD_UNIT_INFORMATION *mVtdUnitInformation = NULL;
+VTD_REGESTER_INFO *mVtdRegsInfoBuffer = NULL;
+
+BOOLEAN mVtdEnabled;
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+FlushPageTableMemory (
+ IN UINTN VtdIndex,
+ IN UINTN Base,
+ IN UINTN Size
+ )
+{
+ if (mVtdUnitInformation[VtdIndex].ECapReg.Bits.C == 0) {
+ WriteBackDataCacheRange ((VOID *)Base, Size);
+ }
+}
+
+/**
+ Perpare cache invalidation interface.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval EFI_UNSUPPORTED Invalidation method is not supported.
+ @retval EFI_OUT_OF_RESOURCES A memory allocation failed.
+**/
+EFI_STATUS
+PerpareCacheInvalidationInterface (
+ IN UINTN VtdIndex
+ )
+{
+ UINT32 Reg32;
+ VTD_IQA_REG IqaReg;
+ VTD_UNIT_INFORMATION *VtdUnitInfo;
+ UINTN VtdUnitBaseAddress;
+
+ VtdUnitInfo = &mVtdUnitInformation[VtdIndex];
+ VtdUnitBaseAddress = VtdUnitInfo->VtdUnitBaseAddress;
+
+ if (VtdUnitInfo->VerReg.Bits.Major <= 5) {
+ VtdUnitInfo->EnableQueuedInvalidation = 0;
+ DEBUG ((DEBUG_INFO, "Use Register-based Invalidation Interface for engine [%d]\n", VtdIndex));
+ return EFI_SUCCESS;
+ }
+
+ if (VtdUnitInfo->ECapReg.Bits.QI == 0) {
+ DEBUG ((DEBUG_ERROR, "Hardware does not support queued invalidations interface for engine [%d]\n", VtdIndex));
+ return EFI_UNSUPPORTED;
+ }
+
+ VtdUnitInfo->EnableQueuedInvalidation = 1;
+ DEBUG ((DEBUG_INFO, "Use Queued Invalidation Interface for engine [%d]\n", VtdIndex));
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ if ((Reg32 & B_GSTS_REG_QIES) != 0) {
+ DEBUG ((DEBUG_ERROR,"Queued Invalidation Interface was enabled.\n"));
+
+ VtdLibDisableQueuedInvalidationInterface (VtdUnitBaseAddress);
+ }
+
+ //
+ // Initialize the Invalidation Queue Tail Register to zero.
+ //
+ MmioWrite64 (VtdUnitBaseAddress + R_IQT_REG, 0);
+
+ //
+ // Setup the IQ address, size and descriptor width through the Invalidation Queue Address Register
+ //
+ if (VtdUnitInfo->QiDescBuffer == NULL) {
+ VtdUnitInfo->QiDescBufferSize = (sizeof (QI_256_DESC) * ((UINTN) 1 << (VTD_INVALIDATION_QUEUE_SIZE + 7)));
+ VtdUnitInfo->QiDescBuffer = AllocatePages (EFI_SIZE_TO_PAGES (VtdUnitInfo->QiDescBufferSize));
+ if (VtdUnitInfo->QiDescBuffer == NULL) {
+ DEBUG ((DEBUG_ERROR,"Could not Alloc Invalidation Queue Buffer.\n"));
+ VTdLogAddEvent (VTDLOG_DXE_QUEUED_INVALIDATION, VTD_LOG_QI_ERROR_OUT_OF_RESOURCES, VtdUnitBaseAddress);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ }
+
+ DEBUG ((DEBUG_INFO, "Invalidation Queue Buffer Size : %d\n", VtdUnitInfo->QiDescBufferSize));
+ //
+ // 4KB Aligned address
+ //
+ IqaReg.Uint64 = (UINT64) (UINTN) VtdUnitInfo->QiDescBuffer;
+ IqaReg.Bits.DW = VTD_QUEUED_INVALIDATION_DESCRIPTOR_WIDTH;
+ IqaReg.Bits.QS = VTD_INVALIDATION_QUEUE_SIZE;
+ MmioWrite64 (VtdUnitBaseAddress + R_IQA_REG, IqaReg.Uint64);
+ IqaReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQA_REG);
+ DEBUG ((DEBUG_INFO, "IQA_REG = 0x%lx, IQH_REG = 0x%lx\n", IqaReg.Uint64, MmioRead64 (VtdUnitBaseAddress + R_IQH_REG)));
+
+ //
+ // Enable the queued invalidation interface through the Global Command Register.
+ // When enabled, hardware sets the QIES field in the Global Status Register.
+ //
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Reg32 |= B_GMCD_REG_QIE;
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32);
+ DEBUG ((DEBUG_INFO, "Enable Queued Invalidation Interface. GCMD_REG = 0x%x\n", Reg32));
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_QIES) == 0);
+
+ VTdLogAddEvent (VTDLOG_DXE_QUEUED_INVALIDATION, VTD_LOG_QI_ENABLE, VtdUnitBaseAddress);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Submit the queued invalidation descriptor to the remapping
+ hardware unit and wait for its completion.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Desc The invalidate descriptor
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval RETURN_DEVICE_ERROR A fault is detected.
+ @retval EFI_INVALID_PARAMETER Parameter is invalid.
+**/
+EFI_STATUS
+SubmitQueuedInvalidationDescriptor (
+ IN UINTN VtdUnitBaseAddress,
+ IN QI_256_DESC *Desc
+ )
+{
+ EFI_STATUS Status;
+ VTD_REGESTER_QI_INFO RegisterQi;
+
+ Status = VtdLibSubmitQueuedInvalidationDescriptor (VtdUnitBaseAddress, Desc, FALSE);
+ if (Status == EFI_DEVICE_ERROR) {
+ RegisterQi.BaseAddress = VtdUnitBaseAddress;
+ RegisterQi.FstsReg = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);;
+ RegisterQi.IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+ VTdLogAddDataEvent (VTDLOG_PEI_REGISTER, VTDLOG_REGISTER_QI, &RegisterQi, sizeof (VTD_REGESTER_QI_INFO));
+
+ MmioWrite32 (VtdUnitBaseAddress + R_FSTS_REG, RegisterQi.FstsReg & (B_FSTS_REG_IQE | B_FSTS_REG_ITE | B_FSTS_REG_ICE));
+ }
+
+ return Status;
+}
+
+/**
+ Invalidate VTd context cache.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+**/
+EFI_STATUS
+InvalidateContextCache (
+ IN UINTN VtdIndex
+ )
+{
+ UINT64 Reg64;
+ QI_256_DESC QiDesc;
+
+ if (mVtdUnitInformation[VtdIndex].EnableQueuedInvalidation == 0) {
+ //
+ // Register-based Invalidation
+ //
+ Reg64 = MmioRead64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + R_CCMD_REG);
+ if ((Reg64 & B_CCMD_REG_ICC) != 0) {
+ DEBUG ((DEBUG_ERROR,"ERROR: InvalidateContextCache: B_CCMD_REG_ICC is set for VTD(%d)\n",VtdIndex));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Reg64 &= ((~B_CCMD_REG_ICC) & (~B_CCMD_REG_CIRG_MASK));
+ Reg64 |= (B_CCMD_REG_ICC | V_CCMD_REG_CIRG_GLOBAL);
+ MmioWrite64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + R_CCMD_REG, Reg64);
+
+ do {
+ Reg64 = MmioRead64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + R_CCMD_REG);
+ } while ((Reg64 & B_CCMD_REG_ICC) != 0);
+ } else {
+ //
+ // Queued Invalidation
+ //
+ QiDesc.Uint64[0] = QI_CC_FM(0) | QI_CC_SID(0) | QI_CC_DID(0) | QI_CC_GRAN(1) | QI_CC_TYPE;
+ QiDesc.Uint64[1] = 0;
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ return SubmitQueuedInvalidationDescriptor(mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress, &QiDesc);
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Invalidate VTd IOTLB.
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+**/
+EFI_STATUS
+InvalidateIOTLB (
+ IN UINTN VtdIndex
+ )
+{
+ UINT64 Reg64;
+ QI_256_DESC QiDesc;
+
+ if (mVtdUnitInformation[VtdIndex].EnableQueuedInvalidation == 0) {
+ //
+ // Register-based Invalidation
+ //
+ Reg64 = MmioRead64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + (mVtdUnitInformation[VtdIndex].ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+ if ((Reg64 & B_IOTLB_REG_IVT) != 0) {
+ DEBUG ((DEBUG_ERROR,"ERROR: InvalidateIOTLB: B_IOTLB_REG_IVT is set for VTD(%d)\n", VtdIndex));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Reg64 &= ((~B_IOTLB_REG_IVT) & (~B_IOTLB_REG_IIRG_MASK));
+ Reg64 |= (B_IOTLB_REG_IVT | V_IOTLB_REG_IIRG_GLOBAL);
+ MmioWrite64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + (mVtdUnitInformation[VtdIndex].ECapReg.Bits.IRO * 16) + R_IOTLB_REG, Reg64);
+
+ do {
+ Reg64 = MmioRead64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + (mVtdUnitInformation[VtdIndex].ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+ } while ((Reg64 & B_IOTLB_REG_IVT) != 0);
+ } else {
+ //
+ // Queued Invalidation
+ //
+ QiDesc.Uint64[0] = QI_IOTLB_DID(0) | QI_IOTLB_DR(CAP_READ_DRAIN(mVtdUnitInformation[VtdIndex].CapReg.Uint64)) | QI_IOTLB_DW(CAP_WRITE_DRAIN(mVtdUnitInformation[VtdIndex].CapReg.Uint64)) | QI_IOTLB_GRAN(1) | QI_IOTLB_TYPE;
+ QiDesc.Uint64[1] = QI_IOTLB_ADDR(0) | QI_IOTLB_IH(0) | QI_IOTLB_AM(0);
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ return SubmitQueuedInvalidationDescriptor(mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress, &QiDesc);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Invalid VTd global IOTLB.
+
+ @param[in] VtdIndex The index of VTd engine.
+
+ @retval EFI_SUCCESS VTd global IOTLB is invalidated.
+ @retval EFI_DEVICE_ERROR VTd global IOTLB is not invalidated.
+**/
+EFI_STATUS
+InvalidateVtdIOTLBGlobal (
+ IN UINTN VtdIndex
+ )
+{
+ if (!mVtdEnabled) {
+ return EFI_SUCCESS;
+ }
+
+ DEBUG((DEBUG_VERBOSE, "InvalidateVtdIOTLBGlobal(%d)\n", VtdIndex));
+
+ //
+ // Write Buffer Flush before invalidation
+ //
+ VtdLibFlushWriteBuffer (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress);
+
+ //
+ // Invalidate the context cache
+ //
+ if (mVtdUnitInformation[VtdIndex].HasDirtyContext) {
+ InvalidateContextCache (VtdIndex);
+ }
+
+ //
+ // Invalidate the IOTLB cache
+ //
+ if (mVtdUnitInformation[VtdIndex].HasDirtyContext || mVtdUnitInformation[VtdIndex].HasDirtyPages) {
+ InvalidateIOTLB (VtdIndex);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Prepare VTD configuration.
+**/
+VOID
+PrepareVtdConfig (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN DomainNumber;
+ EFI_STATUS Status;
+
+ if (mVtdRegsInfoBuffer == NULL) {
+ mVtdRegsInfoBuffer = AllocateZeroPool (sizeof (VTD_REGESTER_INFO) + sizeof (VTD_UINT128) * VTD_CAP_REG_NFR_MAX);
+ ASSERT (mVtdRegsInfoBuffer != NULL);
+ }
+
+ //
+ // Dump VTd error before DXE phase
+ //
+ DumpVtdIfError ();
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ DEBUG ((DEBUG_INFO, "Dump VTd Capability (%d)\n", Index));
+ mVtdUnitInformation[Index].VerReg.Uint32 = MmioRead32 (mVtdUnitInformation[Index].VtdUnitBaseAddress + R_VER_REG);
+ DumpVtdVerRegs (&mVtdUnitInformation[Index].VerReg);
+ mVtdUnitInformation[Index].CapReg.Uint64 = MmioRead64 (mVtdUnitInformation[Index].VtdUnitBaseAddress + R_CAP_REG);
+ DumpVtdCapRegs (&mVtdUnitInformation[Index].CapReg);
+ mVtdUnitInformation[Index].ECapReg.Uint64 = MmioRead64 (mVtdUnitInformation[Index].VtdUnitBaseAddress + R_ECAP_REG);
+ DumpVtdECapRegs (&mVtdUnitInformation[Index].ECapReg);
+
+ if ((mVtdUnitInformation[Index].CapReg.Bits.SLLPS & BIT0) == 0) {
+ DEBUG((DEBUG_WARN, "!!!! 2MB super page is not supported on VTD %d !!!!\n", Index));
+ }
+ if ((mVtdUnitInformation[Index].CapReg.Bits.SAGAW & BIT3) != 0) {
+ DEBUG((DEBUG_INFO, "Support 5-level page-table on VTD %d\n", Index));
+ }
+ if ((mVtdUnitInformation[Index].CapReg.Bits.SAGAW & BIT2) != 0) {
+ DEBUG((DEBUG_INFO, "Support 4-level page-table on VTD %d\n", Index));
+ }
+ if ((mVtdUnitInformation[Index].CapReg.Bits.SAGAW & (BIT3 | BIT2)) == 0) {
+ DEBUG((DEBUG_ERROR, "!!!! Page-table type 0x%X is not supported on VTD %d !!!!\n", Index, mVtdUnitInformation[Index].CapReg.Bits.SAGAW));
+ return ;
+ }
+
+ DomainNumber = (UINTN)1 << (UINT8)((UINTN)mVtdUnitInformation[Index].CapReg.Bits.ND * 2 + 4);
+ if (mVtdUnitInformation[Index].PciDeviceInfo->PciDeviceDataNumber >= DomainNumber) {
+ DEBUG((DEBUG_ERROR, "!!!! Pci device Number(0x%x) >= DomainNumber(0x%x) !!!!\n", mVtdUnitInformation[Index].PciDeviceInfo->PciDeviceDataNumber, DomainNumber));
+ return ;
+ }
+
+ Status = PerpareCacheInvalidationInterface(Index);
+ if (EFI_ERROR (Status)) {
+ ASSERT(FALSE);
+ return;
+ }
+ }
+ return ;
+}
+
+/**
+ Disable PMR in all VTd engine.
+**/
+VOID
+DisablePmr (
+ VOID
+ )
+{
+ UINTN Index;
+ EFI_STATUS Status;
+
+ DEBUG ((DEBUG_INFO,"DisablePmr\n"));
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ Status = VtdLibDisablePmr (mVtdUnitInformation[Index].VtdUnitBaseAddress);
+ VTdLogAddEvent (VTDLOG_DXE_DISABLE_PMR, mVtdUnitInformation[Index].VtdUnitBaseAddress, Status);
+ }
+
+ return ;
+}
+
+/**
+ Update Root Table Address Register
+
+ @param[in] VtdIndex The index used to identify a VTd engine.
+ @param[in] EnableADM TRUE - Enable ADM in TTM bits
+**/
+VOID
+UpdateRootTableAddressRegister (
+ IN UINTN VtdIndex,
+ IN BOOLEAN EnableADM
+ )
+{
+ UINT64 Reg64;
+
+ if (mVtdUnitInformation[VtdIndex].ExtRootEntryTable != NULL) {
+ DEBUG((DEBUG_INFO, "ExtRootEntryTable 0x%x \n", mVtdUnitInformation[VtdIndex].ExtRootEntryTable));
+ Reg64 = (UINT64)(UINTN)mVtdUnitInformation[VtdIndex].ExtRootEntryTable | (EnableADM ? V_RTADDR_REG_TTM_ADM : BIT11);
+ } else {
+ DEBUG((DEBUG_INFO, "RootEntryTable 0x%x \n", mVtdUnitInformation[VtdIndex].RootEntryTable));
+ Reg64 = (UINT64)(UINTN)mVtdUnitInformation[VtdIndex].RootEntryTable | (EnableADM ? V_RTADDR_REG_TTM_ADM : 0);
+ }
+ MmioWrite64 (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress + R_RTADDR_REG, Reg64);
+}
+
+/**
+ Enable DMAR translation.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableDmar (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN VtdUnitBaseAddress;
+ BOOLEAN TEWasEnabled;
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ VtdUnitBaseAddress = mVtdUnitInformation[Index].VtdUnitBaseAddress;
+ DEBUG((DEBUG_INFO, ">>>>>>EnableDmar() for engine [%d] BAR [0x%x]\n", Index, VtdUnitBaseAddress));
+
+ //
+ // Check TE was enabled or not.
+ //
+ TEWasEnabled = ((MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG) & B_GSTS_REG_TE) == B_GSTS_REG_TE);
+
+ if (TEWasEnabled && (mVtdUnitInformation[Index].ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ //
+ // For implementations reporting Enhanced SRTP Support (ESRTPS) field as
+ // Clear in the Capability register, software must not modify this field while
+ // DMA remapping is active (TES=1 in Global Status register).
+ //
+ if (mVtdUnitInformation[Index].CapReg.Bits.ESRTPS == 0) {
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+ }
+
+ //
+ // Enable ADM
+ //
+ UpdateRootTableAddressRegister (Index, TRUE);
+
+ DEBUG((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+
+ DEBUG((DEBUG_INFO, "Enable Abort DMA Mode...\n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ } else {
+ UpdateRootTableAddressRegister (Index, FALSE);
+
+ DEBUG((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+ }
+
+ //
+ // Write Buffer Flush before invalidation
+ //
+ VtdLibFlushWriteBuffer (VtdUnitBaseAddress);
+
+ //
+ // Invalidate the context cache
+ //
+ InvalidateContextCache (Index);
+
+ //
+ // Invalidate the IOTLB cache
+ //
+ InvalidateIOTLB (Index);
+
+ if (TEWasEnabled && (mVtdUnitInformation[Index].ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ if (mVtdUnitInformation[Index].CapReg.Bits.ESRTPS == 0) {
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+ }
+
+ UpdateRootTableAddressRegister (Index, FALSE);
+
+ DEBUG((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+ }
+
+ //
+ // Enable VTd
+ //
+ DEBUG ((DEBUG_INFO, "EnableDmar: Waiting B_GSTS_REG_TE ...\n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ DEBUG ((DEBUG_INFO,"VTD (%d) enabled!<<<<<<\n",Index));
+
+ VTdLogAddEvent (VTDLOG_DXE_ENABLE_DMAR, mVtdUnitInformation[Index].VtdUnitBaseAddress, 0);
+ }
+
+ //
+ // Need disable PMR, since we already setup translation table.
+ //
+ DisablePmr ();
+
+ mVtdEnabled = TRUE;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Disable DMAR translation.
+
+ @retval EFI_SUCCESS DMAR translation is disabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not disabled.
+**/
+EFI_STATUS
+DisableDmar (
+ VOID
+ )
+{
+ UINTN Index;
+ UINTN SubIndex;
+ VTD_UNIT_INFORMATION *VtdUnitInfo;
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ VtdUnitInfo = &mVtdUnitInformation[Index];
+
+ VtdLibDisableDmar (VtdUnitInfo->VtdUnitBaseAddress);
+ VTdLogAddEvent (VTDLOG_DXE_DISABLE_DMAR, VtdUnitInfo->VtdUnitBaseAddress, 0);
+
+ if (VtdUnitInfo->EnableQueuedInvalidation != 0) {
+ //
+ // Disable queued invalidation interface.
+ //
+ VtdLibDisableQueuedInvalidationInterface (VtdUnitInfo->VtdUnitBaseAddress);
+ VTdLogAddEvent (VTDLOG_DXE_QUEUED_INVALIDATION, VTD_LOG_QI_DISABLE, VtdUnitInfo->VtdUnitBaseAddress);
+
+ //
+ // Free descriptor queue memory
+ //
+ if (VtdUnitInfo->QiDescBuffer != NULL) {
+ FreePages(VtdUnitInfo->QiDescBuffer, EFI_SIZE_TO_PAGES (VtdUnitInfo->QiDescBufferSize));
+ VtdUnitInfo->QiDescBuffer = NULL;
+ VtdUnitInfo->QiDescBufferSize = 0;
+ }
+
+ VtdUnitInfo->EnableQueuedInvalidation = 0;
+ }
+ }
+
+ mVtdEnabled = FALSE;
+
+ for (Index = 0; Index < mVtdUnitNumber; Index++) {
+ VtdUnitInfo = &mVtdUnitInformation[Index];
+ DEBUG((DEBUG_INFO, "engine [%d] access\n", Index));
+ for (SubIndex = 0; SubIndex < VtdUnitInfo->PciDeviceInfo->PciDeviceDataNumber; SubIndex++) {
+ DEBUG ((DEBUG_INFO, " PCI S%04X B%02x D%02x F%02x - %d\n",
+ VtdUnitInfo->Segment,
+ VtdUnitInfo->PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Bus,
+ VtdUnitInfo->PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Device,
+ VtdUnitInfo->PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Function,
+ VtdUnitInfo->PciDeviceInfo->PciDeviceData[Index].AccessCount
+ ));
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Dump VTd version registers.
+
+ @param[in] VerReg The version register.
+**/
+VOID
+DumpVtdVerRegs (
+ IN VTD_VER_REG *VerReg
+ )
+{
+ DEBUG ((DEBUG_INFO, " VerReg - 0x%x\n", VerReg->Uint32));
+ DEBUG ((DEBUG_INFO, " Major - 0x%x\n", VerReg->Bits.Major));
+ DEBUG ((DEBUG_INFO, " Minor - 0x%x\n", VerReg->Bits.Minor));
+}
+
+/**
+ Dump VTd capability registers.
+
+ @param[in] CapReg The capability register.
+**/
+VOID
+DumpVtdCapRegs (
+ IN VTD_CAP_REG *CapReg
+ )
+{
+ DEBUG((DEBUG_INFO, " CapReg - 0x%x\n", CapReg->Uint64));
+ DEBUG((DEBUG_INFO, " ND - 0x%x\n", CapReg->Bits.ND));
+ DEBUG((DEBUG_INFO, " AFL - 0x%x\n", CapReg->Bits.AFL));
+ DEBUG((DEBUG_INFO, " RWBF - 0x%x\n", CapReg->Bits.RWBF));
+ DEBUG((DEBUG_INFO, " PLMR - 0x%x\n", CapReg->Bits.PLMR));
+ DEBUG((DEBUG_INFO, " PHMR - 0x%x\n", CapReg->Bits.PHMR));
+ DEBUG((DEBUG_INFO, " CM - 0x%x\n", CapReg->Bits.CM));
+ DEBUG((DEBUG_INFO, " SAGAW - 0x%x\n", CapReg->Bits.SAGAW));
+ DEBUG((DEBUG_INFO, " MGAW - 0x%x\n", CapReg->Bits.MGAW));
+ DEBUG((DEBUG_INFO, " ZLR - 0x%x\n", CapReg->Bits.ZLR));
+ DEBUG((DEBUG_INFO, " FRO - 0x%x\n", CapReg->Bits.FRO));
+ DEBUG((DEBUG_INFO, " SLLPS - 0x%x\n", CapReg->Bits.SLLPS));
+ DEBUG((DEBUG_INFO, " PSI - 0x%x\n", CapReg->Bits.PSI));
+ DEBUG((DEBUG_INFO, " NFR - 0x%x\n", CapReg->Bits.NFR));
+ DEBUG((DEBUG_INFO, " MAMV - 0x%x\n", CapReg->Bits.MAMV));
+ DEBUG((DEBUG_INFO, " DWD - 0x%x\n", CapReg->Bits.DWD));
+ DEBUG((DEBUG_INFO, " DRD - 0x%x\n", CapReg->Bits.DRD));
+ DEBUG((DEBUG_INFO, " FL1GP - 0x%x\n", CapReg->Bits.FL1GP));
+ DEBUG((DEBUG_INFO, " PI - 0x%x\n", CapReg->Bits.PI));
+}
+
+/**
+ Dump VTd extended capability registers.
+
+ @param[in] ECapReg The extended capability register.
+**/
+VOID
+DumpVtdECapRegs (
+ IN VTD_ECAP_REG *ECapReg
+ )
+{
+ DEBUG((DEBUG_INFO, " ECapReg - 0x%lx\n", ECapReg->Uint64));
+ DEBUG((DEBUG_INFO, " C - 0x%x\n", ECapReg->Bits.C));
+ DEBUG((DEBUG_INFO, " QI - 0x%x\n", ECapReg->Bits.QI));
+ DEBUG((DEBUG_INFO, " DT - 0x%x\n", ECapReg->Bits.DT));
+ DEBUG((DEBUG_INFO, " IR - 0x%x\n", ECapReg->Bits.IR));
+ DEBUG((DEBUG_INFO, " EIM - 0x%x\n", ECapReg->Bits.EIM));
+ DEBUG((DEBUG_INFO, " PT - 0x%x\n", ECapReg->Bits.PT));
+ DEBUG((DEBUG_INFO, " SC - 0x%x\n", ECapReg->Bits.SC));
+ DEBUG((DEBUG_INFO, " IRO - 0x%x\n", ECapReg->Bits.IRO));
+ DEBUG((DEBUG_INFO, " MHMV - 0x%x\n", ECapReg->Bits.MHMV));
+ DEBUG((DEBUG_INFO, " MTS - 0x%x\n", ECapReg->Bits.MTS));
+ DEBUG((DEBUG_INFO, " NEST - 0x%x\n", ECapReg->Bits.NEST));
+ DEBUG((DEBUG_INFO, " PASID - 0x%x\n", ECapReg->Bits.PASID));
+ DEBUG((DEBUG_INFO, " PRS - 0x%x\n", ECapReg->Bits.PRS));
+ DEBUG((DEBUG_INFO, " ERS - 0x%x\n", ECapReg->Bits.ERS));
+ DEBUG((DEBUG_INFO, " SRS - 0x%x\n", ECapReg->Bits.SRS));
+ DEBUG((DEBUG_INFO, " NWFS - 0x%x\n", ECapReg->Bits.NWFS));
+ DEBUG((DEBUG_INFO, " EAFS - 0x%x\n", ECapReg->Bits.EAFS));
+ DEBUG((DEBUG_INFO, " PSS - 0x%x\n", ECapReg->Bits.PSS));
+ DEBUG((DEBUG_INFO, " SMTS - 0x%x\n", ECapReg->Bits.SMTS));
+ DEBUG((DEBUG_INFO, " ADMS - 0x%x\n", ECapReg->Bits.ADMS));
+ DEBUG((DEBUG_INFO, " PDS - 0x%x\n", ECapReg->Bits.PDS));
+}
+
+/**
+ Dump VTd registers.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+DumpVtdRegs (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ VTD_REGESTER_INFO *VtdRegInfo;
+ VTD_ECAP_REG ECapReg;
+ VTD_CAP_REG CapReg;
+
+ if (mVtdRegsInfoBuffer == NULL) {
+ return;
+ }
+
+ VtdRegInfo = mVtdRegsInfoBuffer;
+ VtdRegInfo->BaseAddress = VtdUnitBaseAddress;
+ VtdRegInfo->VerReg = MmioRead32 (VtdUnitBaseAddress + R_VER_REG);
+ VtdRegInfo->CapReg = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ VtdRegInfo->EcapReg = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ VtdRegInfo->GstsReg = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ VtdRegInfo->RtaddrReg = MmioRead64 (VtdUnitBaseAddress + R_RTADDR_REG);
+ VtdRegInfo->CcmdReg = MmioRead64 (VtdUnitBaseAddress + R_CCMD_REG);
+ VtdRegInfo->FstsReg = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);
+ VtdRegInfo->FectlReg = MmioRead32 (VtdUnitBaseAddress + R_FECTL_REG);
+ VtdRegInfo->FedataReg = MmioRead32 (VtdUnitBaseAddress + R_FEDATA_REG);
+ VtdRegInfo->FeaddrReg = MmioRead32 (VtdUnitBaseAddress + R_FEADDR_REG);
+ VtdRegInfo->FeuaddrReg = MmioRead32 (VtdUnitBaseAddress + R_FEUADDR_REG);
+ VtdRegInfo->IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+
+ CapReg.Uint64 = VtdRegInfo->CapReg;
+ for (VtdRegInfo->FrcdRegNum = 0; VtdRegInfo->FrcdRegNum < (UINT16) CapReg.Bits.NFR + 1; VtdRegInfo->FrcdRegNum++) {
+ VtdRegInfo->FrcdReg[VtdRegInfo->FrcdRegNum].Uint64Lo = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (VtdRegInfo->FrcdRegNum * 16) + R_FRCD_REG));
+ VtdRegInfo->FrcdReg[VtdRegInfo->FrcdRegNum].Uint64Hi = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (VtdRegInfo->FrcdRegNum * 16) + R_FRCD_REG + sizeof(UINT64)));
+ }
+
+ ECapReg.Uint64 = VtdRegInfo->EcapReg;
+ VtdRegInfo->IvaReg = MmioRead64 (VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IVA_REG);
+ VtdRegInfo->IotlbReg = MmioRead64 (VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+
+ DEBUG((DEBUG_INFO, "#### DumpVtdRegs(0x%016lx) Begin ####\n", VtdUnitBaseAddress));
+
+ VtdLibDumpVtdRegsAll (NULL, NULL, VtdRegInfo);
+
+ DEBUG((DEBUG_INFO, "#### DumpVtdRegs(0x%016lx) End ####\n", VtdUnitBaseAddress));
+
+ VTdLogAddDataEvent (VTDLOG_DXE_REGISTER, VTDLOG_REGISTER_ALL, (VOID *) VtdRegInfo, sizeof (VTD_REGESTER_INFO) + sizeof (VTD_UINT128) * (VtdRegInfo->FrcdRegNum - 1));
+}
+
+/**
+ Dump VTd registers for all VTd engine.
+**/
+VOID
+DumpVtdRegsAll (
+ VOID
+ )
+{
+ UINTN VtdIndex;
+
+ for (VtdIndex = 0; VtdIndex < mVtdUnitNumber; VtdIndex++) {
+ DumpVtdRegs (mVtdUnitInformation[VtdIndex].VtdUnitBaseAddress);
+ }
+}
+
+/**
+ Dump VTd registers if there is error.
+**/
+VOID
+DumpVtdIfError (
+ VOID
+ )
+{
+ UINTN Num;
+ UINTN Index;
+ VTD_FRCD_REG FrcdReg;
+ VTD_CAP_REG CapReg;
+ UINT32 Reg32;
+ BOOLEAN HasError;
+
+ for (Num = 0; Num < mVtdUnitNumber; Num++) {
+ HasError = FALSE;
+ Reg32 = MmioRead32 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_FSTS_REG);
+ if (Reg32 != 0) {
+ HasError = TRUE;
+ }
+ Reg32 = MmioRead32 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_FECTL_REG);
+ if ((Reg32 & BIT30) != 0) {
+ HasError = TRUE;
+ }
+
+ CapReg.Uint64 = MmioRead64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_CAP_REG);
+ for (Index = 0; Index < (UINTN)CapReg.Bits.NFR + 1; Index++) {
+ FrcdReg.Uint64[0] = MmioRead64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG));
+ FrcdReg.Uint64[1] = MmioRead64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ if (FrcdReg.Bits.F != 0) {
+ HasError = TRUE;
+ }
+ }
+
+ if (HasError) {
+ REPORT_STATUS_CODE (EFI_ERROR_CODE, PcdGet32 (PcdErrorCodeVTdError));
+ DEBUG((DEBUG_INFO, "\n#### ERROR ####\n"));
+ DumpVtdRegs (Num);
+ DEBUG((DEBUG_INFO, "#### ERROR ####\n\n"));
+ //
+ // Clear
+ //
+ for (Index = 0; Index < (UINTN)CapReg.Bits.NFR + 1; Index++) {
+ FrcdReg.Uint64[1] = MmioRead64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ if (FrcdReg.Bits.F != 0) {
+ //
+ // Software writes the value read from this field (F) to Clear it.
+ //
+ MmioWrite64 (mVtdUnitInformation[Num].VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)), FrcdReg.Uint64[1]);
+ }
+ }
+ MmioWrite32 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_FSTS_REG, MmioRead32 (mVtdUnitInformation[Num].VtdUnitBaseAddress + R_FSTS_REG));
+ }
+ }
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/DmarTable.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/DmarTable.c
new file mode 100644
index 000000000..91c89de47
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/DmarTable.c
@@ -0,0 +1,63 @@
+/** @file
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi.h>
+#include <PiPei.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/DebugLib.h>
+#include <Library/HobLib.h>
+#include <Library/PciSegmentLib.h>
+#include <IndustryStandard/Vtd.h>
+#include <IndustryStandard/Pci.h>
+#include <Protocol/IoMmu.h>
+#include <Ppi/VtdInfo.h>
+#include <Guid/VtdLogDataHob.h>
+#include "IntelVTdCorePei.h"
+
+/**
+ Parse DMAR DRHD table.
+
+ @param[in] AcpiDmarTable DMAR ACPI table
+ @param[in] Callback Callback function for handle DRHD
+ @param[in] Context Callback function Context
+
+ @return the VTd engine number.
+
+**/
+UINTN
+ParseDmarAcpiTableDrhd (
+ IN EFI_ACPI_DMAR_HEADER *AcpiDmarTable,
+ IN PROCESS_DRHD_CALLBACK_FUNC Callback,
+ IN VOID *Context
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ UINTN VtdIndex;
+
+ VtdIndex = 0;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *) ((UINTN) (AcpiDmarTable + 1));
+
+ while ((UINTN) DmarHeader < (UINTN) AcpiDmarTable + AcpiDmarTable->Header.Length) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ if (Callback != NULL) {
+ Callback (Context, VtdIndex, (EFI_ACPI_DMAR_DRHD_HEADER *) DmarHeader);
+ }
+ VtdIndex++;
+ break;
+ default:
+ break;
+ }
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *) ((UINTN) DmarHeader + DmarHeader->Length);
+ }
+
+ return VtdIndex;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.c
new file mode 100644
index 000000000..0160c3604
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.c
@@ -0,0 +1,1099 @@
+/** @file
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi.h>
+#include <PiPei.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/IoLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PeiServicesLib.h>
+#include <Library/HobLib.h>
+#include <IndustryStandard/Vtd.h>
+#include <Ppi/IoMmu.h>
+#include <Ppi/VtdInfo.h>
+#include <Ppi/MemoryDiscovered.h>
+#include <Ppi/EndOfPeiPhase.h>
+#include <Guid/VtdPmrInfoHob.h>
+#include <Guid/VtdLogDataHob.h>
+#include "IntelVTdCorePei.h"
+
+#define VTD_UNIT_MAX 64
+
+EFI_GUID mVTdInfoGuid = {
+ 0x222f5e30, 0x5cd, 0x49c6, { 0x8a, 0xc, 0x36, 0xd6, 0x58, 0x41, 0xe0, 0x82 }
+};
+
+EFI_GUID mDmaBufferInfoGuid = {
+ 0x7b624ec7, 0xfb67, 0x4f9c, { 0xb6, 0xb0, 0x4d, 0xfa, 0x9c, 0x88, 0x20, 0x39 }
+};
+
+#define MAP_INFO_SIGNATURE SIGNATURE_32 ('D', 'M', 'A', 'P')
+typedef struct {
+ UINT32 Signature;
+ EDKII_IOMMU_OPERATION Operation;
+ UINTN NumberOfBytes;
+ EFI_PHYSICAL_ADDRESS HostAddress;
+ EFI_PHYSICAL_ADDRESS DeviceAddress;
+} MAP_INFO;
+
+/**
+ Allocate memory buffer for VTd log events.
+
+ @param[in] MemorySize Required memory buffer size.
+
+ @retval Buffer address
+
+**/
+UINT8 *
+EFIAPI
+VTdLogAllocMemory (
+ IN CONST UINT32 MemorySize
+ )
+{
+ VOID *HobPtr;
+ VTDLOG_PEI_BUFFER_HOB *BufferHob;
+ UINT8 *ReturnBuffer;
+
+ ReturnBuffer = NULL;
+ HobPtr = GetFirstGuidHob (&gVTdLogBufferHobGuid);
+ if (HobPtr != NULL) {
+ BufferHob = GET_GUID_HOB_DATA (HobPtr);
+
+ if (BufferHob->PostMemBuffer != 0) {
+ //
+ // Post-memory phase
+ //
+ if ((BufferHob->PostMemBufferUsed + MemorySize) < PcdGet32 (PcdVTdPeiPostMemLogBufferSize)) {
+ ReturnBuffer = &((UINT8 *) (UINTN) BufferHob->PostMemBuffer)[BufferHob->PostMemBufferUsed];
+ BufferHob->PostMemBufferUsed += MemorySize;
+ } else {
+ BufferHob->VtdLogPeiError |= VTD_LOG_ERROR_BUFFER_FULL;
+ }
+ }
+ }
+
+ return ReturnBuffer;
+}
+
+/**
+ Add the VTd log event in post memory phase.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+
+**/
+VOID
+EFIAPI
+VTdLogAddEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Data1,
+ IN CONST UINT64 Data2
+ )
+{
+ VTDLOG_EVENT_2PARAM *Item;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ } else if ((PcdGet8 (PcdVTdLogLevel) == 1) && (EventType >= VTDLOG_PEI_ADVANCED)) {
+ return;
+ }
+
+ Item = (VTDLOG_EVENT_2PARAM *) VTdLogAllocMemory (sizeof (VTDLOG_EVENT_2PARAM));
+ if (Item != NULL) {
+ Item->Data1 = Data1;
+ Item->Data2 = Data2;
+ Item->Header.DataSize = sizeof (VTDLOG_EVENT_2PARAM);
+ Item->Header.LogType = (UINT64) (1 << EventType);
+ Item->Header.Timestamp = AsmReadTsc ();
+ }
+}
+
+/**
+ Add a new VTd log event with data.
+
+ @param[in] EventType Event type
+ @param[in] Param parameter
+ @param[in] Data Data
+ @param[in] DataSize Data size
+
+**/
+VOID
+EFIAPI
+VTdLogAddDataEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Param,
+ IN CONST VOID *Data,
+ IN CONST UINT32 DataSize
+ )
+{
+ VTDLOG_EVENT_CONTEXT *Item;
+ UINT32 EventSize;
+
+ if (PcdGet8 (PcdVTdLogLevel) == 0) {
+ return;
+ } else if ((PcdGet8 (PcdVTdLogLevel) == 1) && (EventType >= VTDLOG_PEI_ADVANCED)) {
+ return;
+ }
+
+ EventSize = sizeof (VTDLOG_EVENT_CONTEXT) + DataSize - 1;
+
+ Item = (VTDLOG_EVENT_CONTEXT *) VTdLogAllocMemory (EventSize);
+ if (Item != NULL) {
+ Item->Param = Param;
+ CopyMem (Item->Data, Data, DataSize);
+
+ Item->Header.DataSize = EventSize;
+ Item->Header.LogType = (UINT64) (1 << EventType);
+ Item->Header.Timestamp = AsmReadTsc ();
+ }
+}
+/**
+ Add the VTd log event in pre-memory phase.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Mode Pre-memory DMA protection mode.
+ @param[in] Status Status
+
+**/
+VOID
+EFIAPI
+VTdLogAddPreMemoryEvent (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT8 Mode,
+ IN UINT8 Status
+ )
+{
+ VTDLOG_PEI_BUFFER_HOB *BufferHob;
+ VOID *HobPtr;
+ UINT8 Index;
+
+ HobPtr = GetFirstGuidHob (&gVTdLogBufferHobGuid);
+ if (HobPtr != NULL) {
+ BufferHob = GET_GUID_HOB_DATA (HobPtr);
+
+ for (Index = 0; Index < VTD_LOG_PEI_PRE_MEM_BAR_MAX; Index++) {
+ if (BufferHob->PreMemInfo[Index].Mode == VTD_LOG_PEI_PRE_MEM_NOT_USED) {
+ //
+ // Found a free posttion
+ //
+ BufferHob->PreMemInfo[Index].BarAddress = (UINT32) VtdUnitBaseAddress;
+ BufferHob->PreMemInfo[Index].Mode = Mode;
+ BufferHob->PreMemInfo[Index].Status = Status;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ Initializes the VTd Log.
+
+ @param[in] MemoryInitialized TRUE: It is post-memory phase
+ FALSE: It is pre-memory phase
+**/
+VOID
+EFIAPI
+VTdLogInitialize(
+ BOOLEAN MemoryInitialized
+ )
+{
+ VTDLOG_PEI_BUFFER_HOB *BufferHob;
+ VOID *HobPtr;
+
+ if (PcdGet8 (PcdVTdLogLevel) > 0) {
+ HobPtr = GetFirstGuidHob (&gVTdLogBufferHobGuid);
+ if (HobPtr == NULL) {
+ BufferHob = BuildGuidHob (&gVTdLogBufferHobGuid, sizeof (VTDLOG_PEI_BUFFER_HOB));
+ ASSERT (BufferHob != NULL);
+
+ ZeroMem (BufferHob, sizeof (VTDLOG_PEI_BUFFER_HOB));
+ } else {
+ BufferHob = GET_GUID_HOB_DATA (HobPtr);
+ }
+
+ if (MemoryInitialized) {
+ if ((BufferHob->PostMemBuffer == 0) && (PcdGet32 (PcdVTdPeiPostMemLogBufferSize) > 0)) {
+ BufferHob->PostMemBufferUsed = 0;
+ BufferHob->PostMemBuffer = (UINTN) AllocateAlignedPages (EFI_SIZE_TO_PAGES (PcdGet32 (PcdVTdPeiPostMemLogBufferSize)), sizeof (UINT8));
+ }
+ }
+ }
+}
+
+/**
+ Set IOMMU attribute for a system memory.
+
+ If the IOMMU PPI exists, the system memory cannot be used
+ for DMA by default.
+
+ When a device requests a DMA access for a system memory,
+ the device driver need use SetAttribute() to update the IOMMU
+ attribute to request DMA access (read and/or write).
+
+ @param[in] This The PPI instance pointer.
+ @param[in] DeviceHandle The device who initiates the DMA access request.
+ @param[in] Mapping The mapping value returned from Map().
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by DeviceAddress and Length.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by Mapping.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuSetAttribute (
+ IN EDKII_IOMMU_PPI *This,
+ IN VOID *Mapping,
+ IN UINT64 IoMmuAccess
+ )
+{
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ //
+ // check and clear VTd error
+ //
+ DumpVtdIfError ();
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuSetAttribute:\n"));
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA(Hob);
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ DEBUG ((DEBUG_INFO, "PeiIoMmuSetAttribute: DmaBufferCurrentTop == 0\n"));
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Provides the controller-specific addresses required to access system memory from a
+ DMA bus master.
+
+ @param [in] This The PPI instance pointer.
+ @param [in] Operation Indicates if the bus master is going to read or write to system memory.
+ @param [in] HostAddress The system memory address to map to the PCI controller.
+ @param [in] [out] NumberOfBytes On input the number of bytes to map. On output the number of bytes
+ that were mapped.
+ @param [out] DeviceAddress The resulting map address for the bus master PCI controller to use to
+ access the hosts HostAddress.
+ @param [out] Mapping A resulting value to pass to Unmap().
+
+ @retval EFI_SUCCESS The range was mapped for the returned NumberOfBytes.
+ @retval EFI_UNSUPPORTED The HostAddress cannot be mapped as a common buffer.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources.
+ @retval EFI_DEVICE_ERROR The system hardware could not map the requested address.
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuMap (
+ IN EDKII_IOMMU_PPI *This,
+ IN EDKII_IOMMU_OPERATION Operation,
+ IN VOID *HostAddress,
+ IN OUT UINTN *NumberOfBytes,
+ OUT EFI_PHYSICAL_ADDRESS *DeviceAddress,
+ OUT VOID **Mapping
+ )
+{
+ MAP_INFO *MapInfo;
+ UINTN Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA(Hob);
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuMap - HostAddress - 0x%x, NumberOfBytes - %x\n", HostAddress, *NumberOfBytes));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+ DEBUG ((DEBUG_INFO, " Operation - %x\n", Operation));
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ if (Operation == EdkiiIoMmuOperationBusMasterCommonBuffer ||
+ Operation == EdkiiIoMmuOperationBusMasterCommonBuffer64) {
+ *DeviceAddress = (UINTN) HostAddress;
+ *Mapping = NULL;
+ return EFI_SUCCESS;
+ }
+
+ Length = *NumberOfBytes + sizeof (MAP_INFO);
+ if (Length > DmaBufferInfo->DmaBufferCurrentTop - DmaBufferInfo->DmaBufferCurrentBottom) {
+ DEBUG ((DEBUG_ERROR, "PeiIoMmuMap - OUT_OF_RESOURCE\n"));
+ VTdLogAddEvent (VTDLOG_PEI_VTD_ERROR, VTD_LOG_PEI_VTD_ERROR_PPI_MAP, Length);
+ ASSERT (FALSE);
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ *DeviceAddress = DmaBufferInfo->DmaBufferCurrentBottom;
+ DmaBufferInfo->DmaBufferCurrentBottom += Length;
+
+ MapInfo = (VOID *) (UINTN) (*DeviceAddress + *NumberOfBytes);
+ MapInfo->Signature = MAP_INFO_SIGNATURE;
+ MapInfo->Operation = Operation;
+ MapInfo->NumberOfBytes = *NumberOfBytes;
+ MapInfo->HostAddress = (UINTN) HostAddress;
+ MapInfo->DeviceAddress = *DeviceAddress;
+ *Mapping = MapInfo;
+ DEBUG ((DEBUG_INFO, " Op(%x):DeviceAddress - %x, Mapping - %x\n", Operation, (UINTN) *DeviceAddress, MapInfo));
+
+ //
+ // If this is a read operation from the Bus Master's point of view,
+ // then copy the contents of the real buffer into the mapped buffer
+ // so the Bus Master can read the contents of the real buffer.
+ //
+ if (Operation == EdkiiIoMmuOperationBusMasterRead ||
+ Operation == EdkiiIoMmuOperationBusMasterRead64) {
+ CopyMem (
+ (VOID *) (UINTN) MapInfo->DeviceAddress,
+ (VOID *) (UINTN) MapInfo->HostAddress,
+ MapInfo->NumberOfBytes
+ );
+ }
+
+ VTdLogAddEvent (VTDLOG_PEI_PPI_MAP, (UINT64) HostAddress, Length);
+ return EFI_SUCCESS;
+}
+
+/**
+ Completes the Map() operation and releases any corresponding resources.
+
+ @param [in] This The PPI instance pointer.
+ @param [in] Mapping The mapping value returned from Map().
+
+ @retval EFI_SUCCESS The range was unmapped.
+ @retval EFI_INVALID_PARAMETER Mapping is not a value that was returned by Map().
+ @retval EFI_DEVICE_ERROR The data was not committed to the target system memory.
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuUnmap (
+ IN EDKII_IOMMU_PPI *This,
+ IN VOID *Mapping
+ )
+{
+ MAP_INFO *MapInfo;
+ UINTN Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA(Hob);
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuUnmap - Mapping - %x\n", Mapping));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ if (Mapping == NULL) {
+ return EFI_SUCCESS;
+ }
+
+ MapInfo = Mapping;
+ ASSERT (MapInfo->Signature == MAP_INFO_SIGNATURE);
+ DEBUG ((DEBUG_INFO, " Op(%x):DeviceAddress - %x, NumberOfBytes - %x\n", MapInfo->Operation, (UINTN) MapInfo->DeviceAddress, MapInfo->NumberOfBytes));
+
+ //
+ // If this is a write operation from the Bus Master's point of view,
+ // then copy the contents of the mapped buffer into the real buffer
+ // so the processor can read the contents of the real buffer.
+ //
+ if (MapInfo->Operation == EdkiiIoMmuOperationBusMasterWrite ||
+ MapInfo->Operation == EdkiiIoMmuOperationBusMasterWrite64) {
+ CopyMem (
+ (VOID *) (UINTN) MapInfo->HostAddress,
+ (VOID *) (UINTN) MapInfo->DeviceAddress,
+ MapInfo->NumberOfBytes
+ );
+ }
+
+ Length = MapInfo->NumberOfBytes + sizeof (MAP_INFO);
+ if (DmaBufferInfo->DmaBufferCurrentBottom == MapInfo->DeviceAddress + Length) {
+ DmaBufferInfo->DmaBufferCurrentBottom -= Length;
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Allocates pages that are suitable for an OperationBusMasterCommonBuffer or
+ OperationBusMasterCommonBuffer64 mapping.
+
+ @param [in] This The PPI instance pointer.
+ @param [in] MemoryType The type of memory to allocate, EfiBootServicesData or
+ EfiRuntimeServicesData.
+ @param [in] Pages The number of pages to allocate.
+ @param [in] [out] HostAddress A pointer to store the base system memory address of the
+ allocated range.
+ @param [in] Attributes The requested bit mask of attributes for the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were allocated.
+ @retval EFI_UNSUPPORTED Attributes is unsupported. The only legal attribute bits are
+ MEMORY_WRITE_COMBINE, MEMORY_CACHED and DUAL_ADDRESS_CYCLE.
+ @retval EFI_INVALID_PARAMETER One or more parameters are invalid.
+ @retval EFI_OUT_OF_RESOURCES The memory pages could not be allocated.
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuAllocateBuffer (
+ IN EDKII_IOMMU_PPI *This,
+ IN EFI_MEMORY_TYPE MemoryType,
+ IN UINTN Pages,
+ IN OUT VOID **HostAddress,
+ IN UINT64 Attributes
+ )
+{
+ UINTN Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA(Hob);
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuAllocateBuffer - page - %x\n", Pages));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ Length = EFI_PAGES_TO_SIZE (Pages);
+ if (Length > DmaBufferInfo->DmaBufferCurrentTop - DmaBufferInfo->DmaBufferCurrentBottom) {
+ DEBUG ((DEBUG_ERROR, "PeiIoMmuAllocateBuffer - OUT_OF_RESOURCE\n"));
+ VTdLogAddEvent (VTDLOG_PEI_VTD_ERROR, VTD_LOG_PEI_VTD_ERROR_PPI_ALLOC, Length);
+ ASSERT (FALSE);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ *HostAddress = (VOID *) (UINTN) (DmaBufferInfo->DmaBufferCurrentTop - Length);
+ DmaBufferInfo->DmaBufferCurrentTop -= Length;
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuAllocateBuffer - allocate - %x\n", *HostAddress));
+
+ VTdLogAddEvent (VTDLOG_PEI_PPI_ALLOC_BUFFER, (UINT64) (*HostAddress), Length);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Frees memory that was allocated with AllocateBuffer().
+
+ @param [in] This The PPI instance pointer.
+ @param [in] Pages The number of pages to free.
+ @param [in] HostAddress The base system memory address of the allocated range.
+
+ @retval EFI_SUCCESS The requested memory pages were freed.
+ @retval EFI_INVALID_PARAMETER The memory range specified by HostAddress and Pages
+ was not allocated with AllocateBuffer().
+ @retval EFI_NOT_AVAILABLE_YET DMA protection has been enabled, but DMA buffer are
+ not available to be allocated yet.
+**/
+EFI_STATUS
+EFIAPI
+PeiIoMmuFreeBuffer (
+ IN EDKII_IOMMU_PPI *This,
+ IN UINTN Pages,
+ IN VOID *HostAddress
+ )
+{
+ UINTN Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA (Hob);
+
+ DEBUG ((DEBUG_INFO, "PeiIoMmuFreeBuffer - page - %x, HostAddr - %x\n", Pages, HostAddress));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop - %x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom - %x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+
+ if (DmaBufferInfo->DmaBufferCurrentTop == 0) {
+ return EFI_NOT_AVAILABLE_YET;
+ }
+
+ Length = EFI_PAGES_TO_SIZE (Pages);
+ if ((UINTN)HostAddress == DmaBufferInfo->DmaBufferCurrentTop) {
+ DmaBufferInfo->DmaBufferCurrentTop += Length;
+ }
+
+ return EFI_SUCCESS;
+}
+
+EDKII_IOMMU_PPI mIoMmuPpi = {
+ EDKII_IOMMU_PPI_REVISION,
+ PeiIoMmuSetAttribute,
+ PeiIoMmuMap,
+ PeiIoMmuUnmap,
+ PeiIoMmuAllocateBuffer,
+ PeiIoMmuFreeBuffer,
+};
+
+CONST EFI_PEI_PPI_DESCRIPTOR mIoMmuPpiList = {
+ EFI_PEI_PPI_DESCRIPTOR_PPI | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST,
+ &gEdkiiIoMmuPpiGuid,
+ (VOID *) &mIoMmuPpi
+};
+
+/**
+ Get ACPI DMAT Table from EdkiiVTdInfo PPI
+
+ @retval Address ACPI DMAT Table address
+ @retval NULL Failed to get ACPI DMAT Table
+**/
+EFI_ACPI_DMAR_HEADER * GetAcpiDmarTable (
+ VOID
+ )
+{
+ EFI_STATUS Status;
+ EFI_ACPI_DMAR_HEADER *AcpiDmarTable;
+
+ //
+ // Get the DMAR table
+ //
+ Status = PeiServicesLocatePpi (
+ &gEdkiiVTdInfoPpiGuid,
+ 0,
+ NULL,
+ (VOID **)&AcpiDmarTable
+ );
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "Fail to get ACPI DMAR Table : %r\n", Status));
+ AcpiDmarTable = NULL;
+ } else {
+ VtdLibDumpAcpiDmarDrhd (NULL, NULL, AcpiDmarTable);
+ }
+
+ return AcpiDmarTable;
+}
+
+/**
+ Get the VTd engine context information hob.
+
+ @retval The VTd engine context information.
+
+**/
+VTD_INFO *
+GetVTdInfoHob (
+ VOID
+ )
+{
+ VOID *Hob;
+ VTD_INFO *VTdInfo;
+
+ Hob = GetFirstGuidHob (&mVTdInfoGuid);
+ if (Hob == NULL) {
+ VTdInfo = BuildGuidHob (&mVTdInfoGuid, sizeof (VTD_INFO));
+ if (VTdInfo != NULL) {
+ ZeroMem (VTdInfo, sizeof (VTD_INFO));
+ }
+ } else {
+ VTdInfo = GET_GUID_HOB_DATA(Hob);
+ }
+ return VTdInfo;
+}
+
+/**
+ Callback function of parse DMAR DRHD table in pre-memory phase.
+
+ @param [in] [out] Context Callback function context.
+ @param [in] VTdIndex The VTd engine index.
+ @param [in] DmarDrhd The DRHD table.
+
+**/
+VOID
+ProcessDhrdPreMemory (
+ IN OUT VOID *Context,
+ IN UINTN VTdIndex,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *DmarDrhd
+ )
+{
+ DEBUG ((DEBUG_INFO,"VTD (%d) BaseAddress - 0x%016lx\n", VTdIndex, DmarDrhd->RegisterBaseAddress));
+
+ EnableVTdTranslationProtectionBlockDma ((UINTN) DmarDrhd->RegisterBaseAddress);
+}
+
+/**
+ Callback function of parse DMAR DRHD table in post memory phase.
+
+ @param [in] [out] Context Callback function context.
+ @param [in] VTdIndex The VTd engine index.
+ @param [in] DmarDrhd The DRHD table.
+
+**/
+VOID
+ProcessDrhdPostMemory (
+ IN OUT VOID *Context,
+ IN UINTN VTdIndex,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *DmarDrhd
+ )
+{
+ VTD_UNIT_INFO *VtdUnitInfo;
+ UINTN Index;
+
+ VtdUnitInfo = (VTD_UNIT_INFO *) Context;
+
+ if (DmarDrhd->RegisterBaseAddress == 0) {
+ DEBUG ((DEBUG_INFO,"VTd Base Address is 0\n"));
+ ASSERT (FALSE);
+ return;
+ }
+
+ for (Index = 0; Index < VTD_UNIT_MAX; Index++) {
+ if (VtdUnitInfo[Index].VtdUnitBaseAddress == DmarDrhd->RegisterBaseAddress) {
+ DEBUG ((DEBUG_INFO,"Find VTD (%d) [0x%08x] Exist\n", VTdIndex, DmarDrhd->RegisterBaseAddress));
+ return;
+ }
+ }
+
+ for (VTdIndex = 0; VTdIndex < VTD_UNIT_MAX; VTdIndex++) {
+ if (VtdUnitInfo[VTdIndex].VtdUnitBaseAddress == 0) {
+ VtdUnitInfo[VTdIndex].VtdUnitBaseAddress = (UINTN) DmarDrhd->RegisterBaseAddress;
+ VtdUnitInfo[VTdIndex].Segment = DmarDrhd->SegmentNumber;
+ VtdUnitInfo[VTdIndex].Flags = DmarDrhd->Flags;
+ VtdUnitInfo[VTdIndex].Done = FALSE;
+
+ DEBUG ((DEBUG_INFO,"VTD (%d) BaseAddress - 0x%016lx\n", VTdIndex, DmarDrhd->RegisterBaseAddress));
+ DEBUG ((DEBUG_INFO," Segment - %d, Flags - 0x%x\n", DmarDrhd->SegmentNumber, DmarDrhd->Flags));
+ return;
+ }
+ }
+
+ DEBUG ((DEBUG_INFO,"VtdUnitInfo Table is full\n"));
+ ASSERT (FALSE);
+ return;
+}
+
+/**
+ Initializes the Intel VTd Info in post memory phase.
+
+ @retval EFI_SUCCESS Usb bot driver is successfully initialized.
+ @retval EFI_OUT_OF_RESOURCES Can't initialize the driver.
+**/
+EFI_STATUS
+InitVTdInfo (
+ VOID
+ )
+{
+ VTD_INFO *VTdInfo;
+ EFI_ACPI_DMAR_HEADER *AcpiDmarTable;
+ UINTN VtdUnitNumber;
+ VTD_UNIT_INFO *VtdUnitInfo;
+
+ VTdInfo = GetVTdInfoHob ();
+ ASSERT (VTdInfo != NULL);
+
+ AcpiDmarTable = GetAcpiDmarTable ();
+ ASSERT (AcpiDmarTable != NULL);
+
+ if (VTdInfo->VtdUnitInfo == NULL) {
+ //
+ // Genrate a new Vtd Unit Info Table
+ //
+ VTdInfo->VtdUnitInfo = AllocateZeroPages (EFI_SIZE_TO_PAGES (sizeof (VTD_UNIT_INFO) * VTD_UNIT_MAX));
+ if (VTdInfo->VtdUnitInfo == NULL) {
+ DEBUG ((DEBUG_ERROR, "InitVTdInfo - OUT_OF_RESOURCE\n"));
+ ASSERT (FALSE);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ }
+ VtdUnitInfo = VTdInfo->VtdUnitInfo;
+
+ if (VTdInfo->HostAddressWidth == 0) {
+ VTdInfo->HostAddressWidth = AcpiDmarTable->HostAddressWidth;
+ }
+
+ if (VTdInfo->HostAddressWidth != AcpiDmarTable->HostAddressWidth) {
+ DEBUG ((DEBUG_ERROR, "Host Address Width is not match.\n"));
+ ASSERT (FALSE);
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Parse the DMAR ACPI Table to the new Vtd Unit Info Table
+ //
+ VtdUnitNumber = ParseDmarAcpiTableDrhd (AcpiDmarTable, ProcessDrhdPostMemory, VtdUnitInfo);
+ if (VtdUnitNumber == 0) {
+ return EFI_UNSUPPORTED;
+ }
+
+ for (VTdInfo->VTdEngineCount = 0; VTdInfo->VTdEngineCount < VTD_UNIT_MAX; VTdInfo->VTdEngineCount++) {
+ if (VtdUnitInfo[VTdInfo->VTdEngineCount].VtdUnitBaseAddress == 0) {
+ break;
+ }
+ }
+
+ VTdInfo->AcpiDmarTable = AcpiDmarTable;
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Initializes the Intel VTd DMAR for block all DMA.
+
+ @retval EFI_SUCCESS Driver is successfully initialized.
+ @retval RETURN_NOT_READY Fail to get VTdInfo Hob .
+**/
+EFI_STATUS
+InitVTdDmarBlockAll (
+ VOID
+ )
+{
+ EFI_ACPI_DMAR_HEADER *AcpiDmarTable;
+
+ //
+ // Get the DMAR table
+ //
+ AcpiDmarTable = GetAcpiDmarTable ();
+ ASSERT (AcpiDmarTable != NULL);
+
+ //
+ // Parse the DMAR table and block all DMA
+ //
+ return ParseDmarAcpiTableDrhd (AcpiDmarTable, ProcessDhrdPreMemory, NULL);
+}
+
+/**
+ Initializes DMA buffer
+
+ @retval EFI_SUCCESS DMA buffer is successfully initialized.
+ @retval EFI_INVALID_PARAMETER Invalid DMA buffer size.
+ @retval EFI_OUT_OF_RESOURCES Can't initialize DMA buffer.
+**/
+EFI_STATUS
+InitDmaBuffer(
+ VOID
+ )
+{
+ DMA_BUFFER_INFO *DmaBufferInfo;
+ VOID *Hob;
+ VOID *VtdPmrHobPtr;
+ VTD_PMR_INFO_HOB *VtdPmrHob;
+
+ DEBUG ((DEBUG_INFO, "InitDmaBuffer :\n"));
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ ASSERT(Hob != NULL);
+ DmaBufferInfo = GET_GUID_HOB_DATA (Hob);
+
+ /**
+ When gVtdPmrInfoDataHobGuid exists, it means:
+ 1. Dma buffer is reserved by memory initialize code
+ 2. PeiGetVtdPmrAlignmentLib is used to get alignment
+ 3. Protection regions are determined by the system memory map
+ 4. Protection regions will be conveyed through VTD_PMR_INFO_HOB
+
+ When gVtdPmrInfoDataHobGuid dosen't exist, it means:
+ 1. IntelVTdDmarPei driver will calcuate the protected memory alignment
+ 2. Dma buffer is reserved by AllocateAlignedPages()
+ **/
+
+
+ if (DmaBufferInfo->DmaBufferSize == 0) {
+ DEBUG ((DEBUG_INFO, " DmaBufferSize is 0\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+
+ if (DmaBufferInfo->DmaBufferBase == 0) {
+ VtdPmrHobPtr = GetFirstGuidHob (&gVtdPmrInfoDataHobGuid);
+ if (VtdPmrHobPtr != NULL) {
+ //
+ // Get the protected memory ranges information from the VTd PMR hob
+ //
+ VtdPmrHob = GET_GUID_HOB_DATA (VtdPmrHobPtr);
+
+ if ((VtdPmrHob->ProtectedHighBase - VtdPmrHob->ProtectedLowLimit) < DmaBufferInfo->DmaBufferSize) {
+ DEBUG ((DEBUG_ERROR, " DmaBufferSize not enough\n"));
+ return EFI_INVALID_PARAMETER;
+ }
+ DmaBufferInfo->DmaBufferBase = VtdPmrHob->ProtectedLowLimit;
+ } else {
+ //
+ // Allocate memory for DMA buffer
+ //
+ DmaBufferInfo->DmaBufferBase = (UINTN) AllocateAlignedPages (EFI_SIZE_TO_PAGES (DmaBufferInfo->DmaBufferSize), 0);
+ if (DmaBufferInfo->DmaBufferBase == 0) {
+ DEBUG ((DEBUG_ERROR, " InitDmaBuffer : OutOfResource\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+ DEBUG ((DEBUG_INFO, "Alloc DMA buffer success.\n"));
+ }
+
+ DmaBufferInfo->DmaBufferCurrentTop = DmaBufferInfo->DmaBufferBase + DmaBufferInfo->DmaBufferSize;
+ DmaBufferInfo->DmaBufferCurrentBottom = DmaBufferInfo->DmaBufferBase;
+
+ DEBUG ((DEBUG_INFO, " DmaBufferSize : 0x%x\n", DmaBufferInfo->DmaBufferSize));
+ DEBUG ((DEBUG_INFO, " DmaBufferBase : 0x%x\n", DmaBufferInfo->DmaBufferBase));
+ }
+
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentTop : 0x%x\n", DmaBufferInfo->DmaBufferCurrentTop));
+ DEBUG ((DEBUG_INFO, " DmaBufferCurrentBottom : 0x%x\n", DmaBufferInfo->DmaBufferCurrentBottom));
+
+ VTdLogAddEvent (VTDLOG_PEI_PROTECT_MEMORY_RANGE, DmaBufferInfo->DmaBufferCurrentBottom, DmaBufferInfo->DmaBufferCurrentTop);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Initializes the Intel VTd DMAR for DMA buffer.
+
+ @retval EFI_SUCCESS Usb bot driver is successfully initialized.
+ @retval EFI_OUT_OF_RESOURCES Can't initialize the driver.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+InitVTdDmarForDma (
+ VOID
+ )
+{
+ VTD_INFO *VTdInfo;
+
+ EFI_STATUS Status;
+ EFI_PEI_PPI_DESCRIPTOR *OldDescriptor;
+ EDKII_IOMMU_PPI *OldIoMmuPpi;
+
+ VTdInfo = GetVTdInfoHob ();
+ ASSERT (VTdInfo != NULL);
+
+ DEBUG ((DEBUG_INFO, "PrepareVtdConfig\n"));
+ Status = PrepareVtdConfig (VTdInfo);
+ if (EFI_ERROR (Status)) {
+ ASSERT_EFI_ERROR (Status);
+ return Status;
+ }
+
+ // create root entry table
+ DEBUG ((DEBUG_INFO, "SetupTranslationTable\n"));
+ Status = SetupTranslationTable (VTdInfo);
+ if (EFI_ERROR (Status)) {
+ ASSERT_EFI_ERROR (Status);
+ return Status;
+ }
+
+ DEBUG ((DEBUG_INFO, "EnableVtdDmar\n"));
+ Status = EnableVTdTranslationProtection(VTdInfo);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+
+ DEBUG ((DEBUG_INFO, "Install gEdkiiIoMmuPpiGuid\n"));
+ // install protocol
+ //
+ // (Re)Install PPI.
+ //
+ Status = PeiServicesLocatePpi (
+ &gEdkiiIoMmuPpiGuid,
+ 0,
+ &OldDescriptor,
+ (VOID **) &OldIoMmuPpi
+ );
+ if (!EFI_ERROR (Status)) {
+ Status = PeiServicesReInstallPpi (OldDescriptor, &mIoMmuPpiList);
+ } else {
+ Status = PeiServicesInstallPpi (&mIoMmuPpiList);
+ }
+ ASSERT_EFI_ERROR (Status);
+
+ return Status;
+}
+
+/**
+ This function handles S3 resume task at the end of PEI
+
+ @param[in] PeiServices Pointer to PEI Services Table.
+ @param[in] NotifyDesc Pointer to the descriptor for the Notification event that
+ caused this function to execute.
+ @param[in] Ppi Pointer to the PPI data associated with this function.
+
+ @retval EFI_STATUS Always return EFI_SUCCESS
+**/
+EFI_STATUS
+EFIAPI
+S3EndOfPeiNotify(
+ IN EFI_PEI_SERVICES **PeiServices,
+ IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDesc,
+ IN VOID *Ppi
+ )
+{
+ DEBUG ((DEBUG_INFO, "VTd DMAR PEI S3EndOfPeiNotify\n"));
+
+ if ((PcdGet8 (PcdVTdPolicyPropertyMask) & BIT1) == 0) {
+ DumpVtdIfError ();
+
+ DisableVTdTranslationProtection (GetVTdInfoHob ());
+ }
+ return EFI_SUCCESS;
+}
+
+EFI_PEI_NOTIFY_DESCRIPTOR mS3EndOfPeiNotifyDesc = {
+ (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
+ &gEfiEndOfPeiSignalPpiGuid,
+ S3EndOfPeiNotify
+};
+
+/**
+ This function handles VTd engine setup
+
+ @param[in] PeiServices Pointer to PEI Services Table.
+ @param[in] NotifyDesc Pointer to the descriptor for the Notification event that
+ caused this function to execute.
+ @param[in] Ppi Pointer to the PPI data associated with this function.
+
+ @retval EFI_STATUS Always return EFI_SUCCESS
+**/
+EFI_STATUS
+EFIAPI
+VTdInfoNotify (
+ IN EFI_PEI_SERVICES **PeiServices,
+ IN EFI_PEI_NOTIFY_DESCRIPTOR *NotifyDesc,
+ IN VOID *Ppi
+ )
+{
+ EFI_STATUS Status;
+ VOID *MemoryDiscovered;
+ BOOLEAN MemoryInitialized;
+
+ DEBUG ((DEBUG_INFO, "VTdInfoNotify\n"));
+
+ //
+ // Check if memory is initialized.
+ //
+ MemoryInitialized = FALSE;
+ Status = PeiServicesLocatePpi (
+ &gEfiPeiMemoryDiscoveredPpiGuid,
+ 0,
+ NULL,
+ &MemoryDiscovered
+ );
+ if (!EFI_ERROR(Status)) {
+ MemoryInitialized = TRUE;
+ }
+
+ DEBUG ((DEBUG_INFO, "MemoryInitialized - %x\n", MemoryInitialized));
+
+ if (!MemoryInitialized) {
+ //
+ // If the memory is not initialized,
+ // Protect all system memory
+ //
+
+ InitVTdDmarBlockAll ();
+
+ //
+ // Install PPI.
+ //
+ Status = PeiServicesInstallPpi (&mIoMmuPpiList);
+ ASSERT_EFI_ERROR(Status);
+ } else {
+ //
+ // If the memory is initialized,
+ // Allocate DMA buffer and protect rest system memory
+ //
+
+ VTdLogInitialize (TRUE);
+
+ Status = InitDmaBuffer ();
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // NOTE: We need reinit VTdInfo because previous information might be overriden.
+ //
+ Status = InitVTdInfo ();
+ ASSERT_EFI_ERROR (Status);
+
+ Status = InitVTdDmarForDma ();
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ return EFI_SUCCESS;
+}
+
+EFI_PEI_NOTIFY_DESCRIPTOR mVTdInfoNotifyDesc = {
+ (EFI_PEI_PPI_DESCRIPTOR_NOTIFY_CALLBACK | EFI_PEI_PPI_DESCRIPTOR_TERMINATE_LIST),
+ &gEdkiiVTdInfoPpiGuid,
+ VTdInfoNotify
+};
+
+/**
+ Initializes the Intel VTd DMAR PEIM.
+
+ @param[in] FileHandle Handle of the file being invoked.
+ @param[in] PeiServices Describes the list of possible PEI Services.
+
+ @retval EFI_SUCCESS Usb bot driver is successfully initialized.
+ @retval EFI_OUT_OF_RESOURCES Can't initialize the driver.
+**/
+EFI_STATUS
+EFIAPI
+IntelVTdDmarInitialize (
+ IN EFI_PEI_FILE_HANDLE FileHandle,
+ IN CONST EFI_PEI_SERVICES **PeiServices
+ )
+{
+ EFI_STATUS Status;
+ EFI_BOOT_MODE BootMode;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ DEBUG ((DEBUG_INFO, "IntelVTdDmarInitialize\n"));
+
+ if ((PcdGet8(PcdVTdPolicyPropertyMask) & BIT0) == 0) {
+ return EFI_UNSUPPORTED;
+ }
+
+ VTdLogInitialize (FALSE);
+
+ DmaBufferInfo = BuildGuidHob (&mDmaBufferInfoGuid, sizeof (DMA_BUFFER_INFO));
+ ASSERT(DmaBufferInfo != NULL);
+ if (DmaBufferInfo == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+ ZeroMem (DmaBufferInfo, sizeof (DMA_BUFFER_INFO));
+
+ PeiServicesGetBootMode (&BootMode);
+
+ if (BootMode == BOOT_ON_S3_RESUME) {
+ DmaBufferInfo->DmaBufferSize = PcdGet32 (PcdVTdPeiDmaBufferSizeS3);
+ } else {
+ DmaBufferInfo->DmaBufferSize = PcdGet32 (PcdVTdPeiDmaBufferSize);
+ }
+
+ Status = PeiServicesNotifyPpi (&mVTdInfoNotifyDesc);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Register EndOfPei Notify for S3
+ //
+ if (BootMode == BOOT_ON_S3_RESUME) {
+ Status = PeiServicesNotifyPpi (&mS3EndOfPeiNotifyDesc);
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.h b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.h
new file mode 100644
index 000000000..cca9c7f02
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.h
@@ -0,0 +1,262 @@
+/** @file
+ The definition for DMA access Library.
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef __DMA_ACCESS_LIB_H__
+#define __DMA_ACCESS_LIB_H__
+
+#include <Library/IntelVTdPeiDxeLib.h>
+
+#define VTD_64BITS_ADDRESS(Lo, Hi) (LShiftU64 (Lo, 12) | LShiftU64 (Hi, 32))
+
+//
+// Use 256-bit descriptor
+// Queue size is 128.
+//
+#define VTD_QUEUED_INVALIDATION_DESCRIPTOR_WIDTH 1
+#define VTD_INVALIDATION_QUEUE_SIZE 0
+
+typedef struct {
+ BOOLEAN Done;
+ UINTN VtdUnitBaseAddress;
+ UINT16 Segment;
+ UINT8 Flags;
+ VTD_VER_REG VerReg;
+ VTD_CAP_REG CapReg;
+ VTD_ECAP_REG ECapReg;
+ BOOLEAN Is5LevelPaging;
+ UINT8 EnableQueuedInvalidation;
+ VOID *QiDescBuffer;
+ UINTN QiDescBufferSize;
+ UINTN FixedSecondLevelPagingEntry;
+ UINTN RootEntryTable;
+ UINTN ExtRootEntryTable;
+ UINTN RootEntryTablePageSize;
+ UINTN ExtRootEntryTablePageSize;
+} VTD_UNIT_INFO;
+
+typedef struct {
+ EFI_ACPI_DMAR_HEADER *AcpiDmarTable;
+ UINT8 HostAddressWidth;
+ VTD_REGESTER_THIN_INFO *RegsInfoBuffer;
+ UINTN VTdEngineCount;
+ VTD_UNIT_INFO *VtdUnitInfo;
+} VTD_INFO;
+
+typedef struct {
+ UINTN DmaBufferBase;
+ UINTN DmaBufferSize;
+ UINTN DmaBufferCurrentTop;
+ UINTN DmaBufferCurrentBottom;
+} DMA_BUFFER_INFO;
+
+typedef
+VOID
+(*PROCESS_DRHD_CALLBACK_FUNC) (
+ IN OUT VOID *Context,
+ IN UINTN VTdIndex,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *DmarDrhd
+ );
+
+/**
+ Enable VTd translation table protection for block DMA
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableVTdTranslationProtectionBlockDma (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ Enable VTd translation table protection.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableVTdTranslationProtection (
+ IN VTD_INFO *VTdInfo
+ );
+
+/**
+ Disable VTd translation table protection.
+
+ @param[in] VTdInfo The VTd engine context information.
+**/
+VOID
+DisableVTdTranslationProtection (
+ IN VTD_INFO *VTdInfo
+ );
+
+/**
+ Parse DMAR DRHD table.
+
+ @param[in] AcpiDmarTable DMAR ACPI table
+ @param[in] Callback Callback function for handle DRHD
+ @param[in] Context Callback function Context
+
+ @return the VTd engine number.
+
+**/
+UINTN
+ParseDmarAcpiTableDrhd (
+ IN EFI_ACPI_DMAR_HEADER *AcpiDmarTable,
+ IN PROCESS_DRHD_CALLBACK_FUNC Callback,
+ IN VOID *Context
+ );
+
+/**
+ Prepare VTD configuration.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS Prepare Vtd config success
+**/
+EFI_STATUS
+PrepareVtdConfig (
+ IN VTD_INFO *VTdInfo
+ );
+
+/**
+ Setup VTd translation table.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCE Setup translation table fail.
+**/
+EFI_STATUS
+SetupTranslationTable (
+ IN VTD_INFO *VTdInfo
+ );
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+FlushPageTableMemory (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN UINTN Base,
+ IN UINTN Size
+ );
+
+/**
+ Allocate zero pages.
+
+ @param[in] Pages the number of pages.
+
+ @return the page address.
+ @retval NULL No resource to allocate pages.
+**/
+VOID *
+EFIAPI
+AllocateZeroPages (
+ IN UINTN Pages
+ );
+
+/**
+ Return the index of PCI data.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] Segment The Segment used to identify a VTd engine.
+ @param[in] SourceId The SourceId used to identify a VTd engine and table entry.
+
+ @return The index of the PCI data.
+ @retval (UINTN)-1 The PCI data is not found.
+**/
+UINTN
+GetPciDataIndex (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN UINT16 Segment,
+ IN VTD_SOURCE_ID SourceId
+ );
+
+/**
+ Get the VTd engine context information hob.
+
+ @retval The VTd engine context information.
+
+**/
+VTD_INFO *
+GetVTdInfoHob (
+ VOID
+ );
+
+/**
+ Dump VTd registers if there is error.
+**/
+VOID
+DumpVtdIfError (
+ VOID
+ );
+
+/**
+ Add the VTd log event in post memory phase.
+
+ @param[in] EventType Event type
+ @param[in] Data1 First parameter
+ @param[in] Data2 Second parameter
+
+**/
+VOID
+EFIAPI
+VTdLogAddEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Data1,
+ IN CONST UINT64 Data2
+ );
+
+/**
+ Add a new VTd log event with data.
+
+ @param[in] EventType Event type
+ @param[in] Param parameter
+ @param[in] Data Data
+ @param[in] DataSize Data size
+
+**/
+VOID
+EFIAPI
+VTdLogAddDataEvent (
+ IN CONST VTDLOG_EVENT_TYPE EventType,
+ IN CONST UINT64 Param,
+ IN CONST VOID *Data,
+ IN CONST UINT32 DataSize
+ );
+
+/**
+ Add the VTd log event in pre-memory phase.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Mode Pre-memory DMA protection mode.
+ @param[in] Status Status
+
+**/
+VOID
+EFIAPI
+VTdLogAddPreMemoryEvent (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT8 Mode,
+ IN UINT8 Status
+ );
+
+extern EFI_GUID mVTdInfoGuid;
+extern EFI_GUID mDmaBufferInfoGuid;
+
+#endif
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.inf b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.inf
new file mode 100644
index 000000000..f756c543c
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.inf
@@ -0,0 +1,70 @@
+## @file
+# Component INF file for the Intel VTd DMAR PEIM.
+#
+# This driver initializes VTd engine based upon EDKII_VTD_INFO_PPI
+# and provide DMA protection in PEI.
+#
+# Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010017
+ BASE_NAME = IntelVTdCorePei
+ MODULE_UNI_FILE = IntelVTdCorePei.uni
+ FILE_GUID = 9311b0cc-5c08-4c0a-bec8-23afab024e48
+ MODULE_TYPE = PEIM
+ VERSION_STRING = 2.0
+ ENTRY_POINT = IntelVTdDmarInitialize
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ IntelSiliconPkg/IntelSiliconPkg.dec
+
+[Sources]
+ IntelVTdCorePei.c
+ IntelVTdCorePei.h
+ IntelVTdDmar.c
+ DmarTable.c
+ TranslationTable.c
+
+[LibraryClasses]
+ DebugLib
+ BaseMemoryLib
+ BaseLib
+ PeimEntryPoint
+ PeiServicesLib
+ HobLib
+ IoLib
+ CacheMaintenanceLib
+ PciSegmentLib
+ IntelVTdPeiDxeLib
+
+[Guids]
+ gVTdLogBufferHobGuid ## PRODUCES CONSUMES
+ gVtdPmrInfoDataHobGuid ## CONSUMES
+
+[Ppis]
+ gEdkiiIoMmuPpiGuid ## PRODUCES
+ gEdkiiVTdInfoPpiGuid ## CONSUMES
+ gEfiPeiMemoryDiscoveredPpiGuid ## CONSUMES
+ gEfiEndOfPeiSignalPpiGuid ## CONSUMES
+ gEdkiiVTdNullRootEntryTableGuid ## CONSUMES
+
+[Pcd]
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPolicyPropertyMask ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiDmaBufferSize ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiDmaBufferSizeS3 ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdSupportAbortDmaMode ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdLogLevel ## CONSUMES
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiPostMemLogBufferSize ## CONSUMES
+
+[Depex]
+ gEfiPeiMasterBootModePpiGuid AND
+ gEdkiiVTdInfoPpiGuid
+
+[UserExtensions.TianoCore."ExtraFiles"]
+ IntelVTdCorePeiExtra.uni
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.uni b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.uni
new file mode 100644
index 000000000..2b5b260f5
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePei.uni
@@ -0,0 +1,14 @@
+// /** @file
+// IntelVTdDmarPei Module Localized Abstract and Description Content
+//
+// Copyright (c) 2020, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+
+#string STR_MODULE_ABSTRACT #language en-US "Intel VTd CORE PEI Driver."
+
+#string STR_MODULE_DESCRIPTION #language en-US "This driver initializes VTd engine based upon EDKII_VTD_INFO_PPI and provide DMA protection to device in PEI."
+
--git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePeiExtra.uni b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePeiExtra.uni
new file mode 100644
index 000000000..14848f924
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdCorePeiExtra.uni
@@ -0,0 +1,14 @@
+// /** @file
+// IntelVTdDmarPei Localized Strings and Content
+//
+// Copyright (c) 2020, Intel Corporation. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+// **/
+
+#string STR_PROPERTIES_MODULE_NAME
+#language en-US
+"Intel VTd CORE PEI Driver"
+
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdDmar.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdDmar.c
new file mode 100644
index 000000000..93207ba52
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/IntelVTdDmar.c
@@ -0,0 +1,727 @@
+/** @file
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <PiPei.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/IoLib.h>
+#include <Library/DebugLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/PeiServicesLib.h>
+#include <IndustryStandard/Vtd.h>
+#include <Ppi/VtdInfo.h>
+#include <Ppi/VtdNullRootEntryTable.h>
+#include <Ppi/IoMmu.h>
+#include <Guid/VtdLogDataHob.h>
+#include "IntelVTdCorePei.h"
+
+#define VTD_CAP_REG_NFR_MAX (256)
+
+/**
+ Flush VTD page table and context table memory.
+
+ This action is to make sure the IOMMU engine can get final data in memory.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] Base The base address of memory to be flushed.
+ @param[in] Size The size of memory in bytes to be flushed.
+**/
+VOID
+FlushPageTableMemory (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN UINTN Base,
+ IN UINTN Size
+ )
+{
+ if (VtdUnitInfo->ECapReg.Bits.C == 0) {
+ WriteBackDataCacheRange ((VOID *) Base, Size);
+ }
+}
+
+/**
+ Perpare cache invalidation interface.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval EFI_UNSUPPORTED Invalidation method is not supported.
+ @retval EFI_OUT_OF_RESOURCES A memory allocation failed.
+**/
+EFI_STATUS
+PerpareCacheInvalidationInterface (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINT32 Reg32;
+ VTD_ECAP_REG ECapReg;
+ VTD_IQA_REG IqaReg;
+ UINTN VtdUnitBaseAddress;
+
+ VtdUnitBaseAddress = VtdUnitInfo->VtdUnitBaseAddress;
+
+ if (VtdUnitInfo->VerReg.Bits.Major <= 5) {
+ VtdUnitInfo->EnableQueuedInvalidation = 0;
+ DEBUG ((DEBUG_INFO, "Use Register-based Invalidation Interface for engine [0x%x]\n", VtdUnitBaseAddress));
+ return EFI_SUCCESS;
+ }
+
+ ECapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ if (ECapReg.Bits.QI == 0) {
+ DEBUG ((DEBUG_ERROR, "Hardware does not support queued invalidations interface for engine [0x%x]\n", VtdUnitBaseAddress));
+ return EFI_UNSUPPORTED;
+ }
+
+ VtdUnitInfo->EnableQueuedInvalidation = 1;
+ DEBUG ((DEBUG_INFO, "Use Queued Invalidation Interface for engine [0x%x]\n", VtdUnitBaseAddress));
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ if ((Reg32 & B_GSTS_REG_QIES) != 0) {
+ DEBUG ((DEBUG_INFO,"Queued Invalidation Interface was enabled.\n"));
+
+ VtdLibDisableQueuedInvalidationInterface (VtdUnitBaseAddress);
+ }
+
+ //
+ // Initialize the Invalidation Queue Tail Register to zero.
+ //
+ MmioWrite64 (VtdUnitBaseAddress + R_IQT_REG, 0);
+
+ //
+ // Setup the IQ address, size and descriptor width through the Invalidation Queue Address Register
+ //
+ if (VtdUnitInfo->QiDescBuffer == NULL) {
+ VtdUnitInfo->QiDescBufferSize = (sizeof (QI_256_DESC) * ((UINTN) 1 << (VTD_INVALIDATION_QUEUE_SIZE + 7)));
+ VtdUnitInfo->QiDescBuffer = AllocatePages (EFI_SIZE_TO_PAGES (VtdUnitInfo->QiDescBufferSize));
+ if (VtdUnitInfo->QiDescBuffer == NULL) {
+ DEBUG ((DEBUG_ERROR,"Could not Alloc Invalidation Queue Buffer.\n"));
+ VTdLogAddEvent (VTDLOG_PEI_QUEUED_INVALIDATION, VTD_LOG_QI_ERROR_OUT_OF_RESOURCES, VtdUnitBaseAddress);
+ return EFI_OUT_OF_RESOURCES;
+ }
+ }
+
+ DEBUG ((DEBUG_INFO, "Invalidation Queue Buffer Size : %d\n", VtdUnitInfo->QiDescBufferSize));
+ //
+ // 4KB Aligned address
+ //
+ IqaReg.Uint64 = (UINT64) (UINTN) VtdUnitInfo->QiDescBuffer;
+ IqaReg.Bits.DW = VTD_QUEUED_INVALIDATION_DESCRIPTOR_WIDTH;
+ IqaReg.Bits.QS = VTD_INVALIDATION_QUEUE_SIZE;
+ MmioWrite64 (VtdUnitBaseAddress + R_IQA_REG, IqaReg.Uint64);
+ IqaReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQA_REG);
+ DEBUG ((DEBUG_INFO, "IQA_REG = 0x%lx, IQH_REG = 0x%lx\n", IqaReg.Uint64, MmioRead64 (VtdUnitBaseAddress + R_IQH_REG)));
+
+ //
+ // Enable the queued invalidation interface through the Global Command Register.
+ // When enabled, hardware sets the QIES field in the Global Status Register.
+ //
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Reg32 |= B_GMCD_REG_QIE;
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32);
+ DEBUG ((DEBUG_INFO, "Enable Queued Invalidation Interface. GCMD_REG = 0x%x\n", Reg32));
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_QIES) == 0);
+
+ VTdLogAddEvent (VTDLOG_PEI_QUEUED_INVALIDATION, VTD_LOG_QI_ENABLE, VtdUnitBaseAddress);
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Submit the queued invalidation descriptor to the remapping
+ hardware unit and wait for its completion.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Desc The invalidate descriptor
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval RETURN_DEVICE_ERROR A fault is detected.
+ @retval EFI_INVALID_PARAMETER Parameter is invalid.
+**/
+EFI_STATUS
+SubmitQueuedInvalidationDescriptor (
+ IN UINTN VtdUnitBaseAddress,
+ IN QI_256_DESC *Desc
+ )
+{
+ EFI_STATUS Status;
+ VTD_REGESTER_QI_INFO RegisterQi;
+
+ Status = VtdLibSubmitQueuedInvalidationDescriptor (VtdUnitBaseAddress, Desc, FALSE);
+ if (Status == EFI_DEVICE_ERROR) {
+ RegisterQi.BaseAddress = VtdUnitBaseAddress;
+ RegisterQi.FstsReg = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);;
+ RegisterQi.IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+ VTdLogAddDataEvent (VTDLOG_PEI_REGISTER, VTDLOG_REGISTER_QI, &RegisterQi, sizeof (VTD_REGESTER_QI_INFO));
+
+ MmioWrite32 (VtdUnitBaseAddress + R_FSTS_REG, RegisterQi.FstsReg & (B_FSTS_REG_IQE | B_FSTS_REG_ITE | B_FSTS_REG_ICE));
+ }
+
+ return Status;
+}
+
+/**
+ Invalidate VTd context cache.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+**/
+EFI_STATUS
+InvalidateContextCache (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINT64 Reg64;
+ QI_256_DESC QiDesc;
+
+ if (VtdUnitInfo->EnableQueuedInvalidation == 0) {
+ //
+ // Register-based Invalidation
+ //
+ Reg64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + R_CCMD_REG);
+ if ((Reg64 & B_CCMD_REG_ICC) != 0) {
+ DEBUG ((DEBUG_ERROR,"ERROR: InvalidateContextCache: B_CCMD_REG_ICC is set for VTD(%x)\n", VtdUnitInfo->VtdUnitBaseAddress));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Reg64 &= ((~B_CCMD_REG_ICC) & (~B_CCMD_REG_CIRG_MASK));
+ Reg64 |= (B_CCMD_REG_ICC | V_CCMD_REG_CIRG_GLOBAL);
+ MmioWrite64 (VtdUnitInfo->VtdUnitBaseAddress + R_CCMD_REG, Reg64);
+
+ do {
+ Reg64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + R_CCMD_REG);
+ } while ((Reg64 & B_CCMD_REG_ICC) != 0);
+ } else {
+ //
+ // Queued Invalidation
+ //
+ QiDesc.Uint64[0] = QI_CC_FM(0) | QI_CC_SID(0) | QI_CC_DID(0) | QI_CC_GRAN(1) | QI_CC_TYPE;
+ QiDesc.Uint64[1] = 0;
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ return SubmitQueuedInvalidationDescriptor(VtdUnitInfo->VtdUnitBaseAddress, &QiDesc);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Invalidate VTd IOTLB.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+**/
+EFI_STATUS
+InvalidateIOTLB (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINT64 Reg64;
+ VTD_ECAP_REG ECapReg;
+ VTD_CAP_REG CapReg;
+ QI_256_DESC QiDesc;
+
+ if (VtdUnitInfo->EnableQueuedInvalidation == 0) {
+ //
+ // Register-based Invalidation
+ //
+ ECapReg.Uint64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + R_ECAP_REG);
+
+ Reg64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+ if ((Reg64 & B_IOTLB_REG_IVT) != 0) {
+ DEBUG ((DEBUG_ERROR, "ERROR: InvalidateIOTLB: B_IOTLB_REG_IVT is set for VTD(%x)\n", VtdUnitInfo->VtdUnitBaseAddress));
+ return EFI_DEVICE_ERROR;
+ }
+
+ Reg64 &= ((~B_IOTLB_REG_IVT) & (~B_IOTLB_REG_IIRG_MASK));
+ Reg64 |= (B_IOTLB_REG_IVT | V_IOTLB_REG_IIRG_GLOBAL);
+ MmioWrite64 (VtdUnitInfo->VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IOTLB_REG, Reg64);
+
+ do {
+ Reg64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + (ECapReg.Bits.IRO * 16) + R_IOTLB_REG);
+ } while ((Reg64 & B_IOTLB_REG_IVT) != 0);
+ } else {
+ //
+ // Queued Invalidation
+ //
+ CapReg.Uint64 = MmioRead64 (VtdUnitInfo->VtdUnitBaseAddress + R_CAP_REG);
+ QiDesc.Uint64[0] = QI_IOTLB_DID(0) | (CapReg.Bits.DRD ? QI_IOTLB_DR(1) : QI_IOTLB_DR(0)) | (CapReg.Bits.DWD ? QI_IOTLB_DW(1) : QI_IOTLB_DW(0)) | QI_IOTLB_GRAN(1) | QI_IOTLB_TYPE;
+ QiDesc.Uint64[1] = QI_IOTLB_ADDR(0) | QI_IOTLB_IH(0) | QI_IOTLB_AM(0);
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ return SubmitQueuedInvalidationDescriptor(VtdUnitInfo->VtdUnitBaseAddress, &QiDesc);
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Enable DMAR translation in pre-mem phase.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] RtaddrRegValue The value of RTADDR_REG.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableDmarPreMem (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT64 RtaddrRegValue
+ )
+{
+ UINT32 Reg32;
+
+ DEBUG ((DEBUG_INFO, ">>>>>>EnableDmarPreMem() for engine [%x] \n", VtdUnitBaseAddress));
+
+ DEBUG ((DEBUG_INFO, "RTADDR_REG : 0x%016lx \n", RtaddrRegValue));
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, RtaddrRegValue);
+
+ DEBUG ((DEBUG_INFO, "EnableDmarPreMem: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ DEBUG ((DEBUG_INFO, "EnableDmarPreMem: R_GSTS_REG = 0x%x \n", Reg32));
+
+ //
+ // Write Buffer Flush
+ //
+ VtdLibFlushWriteBuffer (VtdUnitBaseAddress);
+
+ //
+ // Enable VTd
+ //
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ DEBUG ((DEBUG_INFO, "VTD () enabled!<<<<<<\n"));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Enable DMAR translation.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] RootEntryTable The address of the VTd RootEntryTable.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableDmar (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN UINTN RootEntryTable
+ )
+{
+ UINTN VtdUnitBaseAddress;
+ BOOLEAN TEWasEnabled;
+
+ VtdUnitBaseAddress = VtdUnitInfo->VtdUnitBaseAddress;
+
+ DEBUG ((DEBUG_INFO, ">>>>>>EnableDmar() for engine [%x] \n", VtdUnitBaseAddress));
+
+ //
+ // Check TE was enabled or not.
+ //
+ TEWasEnabled = ((MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG) & B_GSTS_REG_TE) == B_GSTS_REG_TE);
+
+ if (TEWasEnabled && (VtdUnitInfo->ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ //
+ // For implementations reporting Enhanced SRTP Support (ESRTPS) field as
+ // Clear in the Capability register, software must not modify this field while
+ // DMA remapping is active (TES=1 in Global Status register).
+ //
+ if (VtdUnitInfo->CapReg.Bits.ESRTPS == 0) {
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+ }
+
+ //
+ // Enable ADM
+ //
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, (UINT64) (RootEntryTable | V_RTADDR_REG_TTM_ADM));
+
+ DEBUG ((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+
+ DEBUG ((DEBUG_INFO, "Enable Abort DMA Mode...\n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ } else {
+ DEBUG ((DEBUG_INFO, "RootEntryTable 0x%x \n", RootEntryTable));
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, (UINT64) RootEntryTable);
+
+ DEBUG ((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+ }
+
+ //
+ // Write Buffer Flush before invalidation
+ //
+ VtdLibFlushWriteBuffer (VtdUnitBaseAddress);
+
+ //
+ // Invalidate the context cache
+ //
+ InvalidateContextCache (VtdUnitInfo);
+
+ //
+ // Invalidate the IOTLB cache
+ //
+ InvalidateIOTLB (VtdUnitInfo);
+
+ if (TEWasEnabled && (VtdUnitInfo->ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ if (VtdUnitInfo->CapReg.Bits.ESRTPS == 0) {
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+ }
+
+ DEBUG ((DEBUG_INFO, "RootEntryTable 0x%x \n", RootEntryTable));
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, (UINT64) RootEntryTable);
+
+ DEBUG ((DEBUG_INFO, "EnableDmar: waiting for RTPS bit to be set... \n"));
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+ }
+
+ //
+ // Enable VTd
+ //
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ DEBUG ((DEBUG_INFO, "VTD () enabled!<<<<<<\n"));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Enable VTd translation table protection for block DMA
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableVTdTranslationProtectionBlockDma (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ EFI_STATUS Status;
+ VTD_ECAP_REG ECapReg;
+ EDKII_VTD_NULL_ROOT_ENTRY_TABLE_PPI *RootEntryTable;
+ UINT8 Mode;
+
+ DEBUG ((DEBUG_INFO, "EnableVTdTranslationProtectionBlockDma - 0x%08x\n", VtdUnitBaseAddress));
+
+ DEBUG ((DEBUG_INFO, "PcdVTdSupportAbortDmaMode : %d\n", PcdGetBool (PcdVTdSupportAbortDmaMode)));
+
+ ECapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ DEBUG ((DEBUG_INFO, "ECapReg.ADMS : %d\n", ECapReg.Bits.ADMS));
+
+ if ((ECapReg.Bits.ADMS == 1) && PcdGetBool (PcdVTdSupportAbortDmaMode)) {
+ Mode = VTD_LOG_PEI_PRE_MEM_ADM;
+ //
+ // Use Abort DMA Mode
+ //
+ DEBUG ((DEBUG_INFO, "Enable abort DMA mode.\n"));
+ Status = EnableDmarPreMem (VtdUnitBaseAddress, V_RTADDR_REG_TTM_ADM);
+ } else {
+ //
+ // Use Null Root Entry Table
+ //
+ Status = PeiServicesLocatePpi (
+ &gEdkiiVTdNullRootEntryTableGuid,
+ 0,
+ NULL,
+ (VOID **)&RootEntryTable
+ );
+ if (EFI_ERROR (Status)) {
+ Mode = VTD_LOG_PEI_PRE_MEM_DISABLE;
+ DEBUG ((DEBUG_ERROR, "Locate Null Root Entry Table Ppi Failed : %r\n", Status));
+ ASSERT (FALSE);
+ } else {
+ Mode = VTD_LOG_PEI_PRE_MEM_TE;
+ DEBUG ((DEBUG_INFO, "Block All DMA by TE.\n"));
+ Status = EnableDmarPreMem (VtdUnitBaseAddress, (UINT64) (*RootEntryTable));
+ }
+ }
+
+ VTdLogAddPreMemoryEvent (VtdUnitBaseAddress, Mode, EFI_ERROR (Status) ? 0 : 1);
+
+ return Status;
+}
+
+/**
+ Enable VTd translation table protection.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS DMAR translation is enabled.
+ @retval EFI_DEVICE_ERROR DMAR translation is not enabled.
+**/
+EFI_STATUS
+EnableVTdTranslationProtection (
+ IN VTD_INFO *VTdInfo
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+ VTD_UNIT_INFO *VtdUnitInfo;
+
+ for (Index = 0; Index < VTdInfo->VTdEngineCount; Index++) {
+ VtdUnitInfo = &VTdInfo->VtdUnitInfo[Index];
+ if (VtdUnitInfo->Done) {
+ DEBUG ((DEBUG_INFO, "EnableVtdDmar (%d) was enabled\n", Index));
+ continue;
+ }
+
+ if (VtdUnitInfo->ExtRootEntryTable != 0) {
+ DEBUG ((DEBUG_INFO, "EnableVtdDmar (%d) ExtRootEntryTable 0x%x\n", Index, VtdUnitInfo->ExtRootEntryTable));
+ Status = EnableDmar (VtdUnitInfo, VtdUnitInfo->ExtRootEntryTable | BIT11);
+ } else {
+ DEBUG ((DEBUG_INFO, "EnableVtdDmar (%d) RootEntryTable 0x%x\n", Index, VtdUnitInfo->RootEntryTable));
+ Status = EnableDmar (VtdUnitInfo, VtdUnitInfo->RootEntryTable);
+ }
+
+ VTdLogAddEvent (VTDLOG_PEI_POST_MEM_ENABLE_DMA_PROTECT, VTdInfo->VtdUnitInfo[Index].VtdUnitBaseAddress, Status);
+
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "EnableVtdDmar (%d) Failed !\n", Index));
+ return Status;
+ }
+ VtdUnitInfo->Done = TRUE;
+ }
+ return EFI_SUCCESS;
+}
+
+/**
+ Disable VTd translation table protection.
+
+ @param[in] VTdInfo The VTd engine context information.
+**/
+VOID
+DisableVTdTranslationProtection (
+ IN VTD_INFO *VTdInfo
+ )
+{
+ UINTN Index;
+ VTD_UNIT_INFO *VtdUnitInfo;
+
+ if (VTdInfo == NULL) {
+ return;
+ }
+
+ DEBUG ((DEBUG_INFO, "DisableVTdTranslationProtection - %d Vtd Engine\n", VTdInfo->VTdEngineCount));
+
+ for (Index = 0; Index < VTdInfo->VTdEngineCount; Index++) {
+ VtdUnitInfo = &VTdInfo->VtdUnitInfo[Index];
+
+ VtdLibDisableDmar (VtdUnitInfo->VtdUnitBaseAddress);
+ VTdLogAddEvent (VTDLOG_PEI_POST_MEM_DISABLE_DMA_PROTECT, VtdUnitInfo->VtdUnitBaseAddress, 0);
+
+ if (VtdUnitInfo->EnableQueuedInvalidation != 0) {
+ //
+ // Disable queued invalidation interface.
+ //
+ VtdLibDisableQueuedInvalidationInterface (VtdUnitInfo->VtdUnitBaseAddress);
+
+ if (VtdUnitInfo->QiDescBuffer != NULL) {
+ FreePages(VtdUnitInfo->QiDescBuffer, EFI_SIZE_TO_PAGES (VtdUnitInfo->QiDescBufferSize));
+ VtdUnitInfo->QiDescBuffer = NULL;
+ VtdUnitInfo->QiDescBufferSize = 0;
+ }
+
+ VtdUnitInfo->EnableQueuedInvalidation = 0;
+ VTdLogAddEvent (VTDLOG_PEI_QUEUED_INVALIDATION, VTD_LOG_QI_DISABLE, VtdUnitInfo->VtdUnitBaseAddress);
+ }
+ }
+
+ return;
+}
+
+/**
+ Check if VTd engine use 5 level paging.
+
+ @param[in] HostAddressWidth Host Address Width.
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[out] Is5LevelPaging Use 5 level paging or not
+
+ @retval EFI_SUCCESS Success
+ @retval EFI_UNSUPPORTED Feature is not support
+
+**/
+EFI_STATUS
+VtdCheckUsing5LevelPaging (
+ IN UINT8 HostAddressWidth,
+ IN VTD_CAP_REG CapReg,
+ OUT BOOLEAN *Is5LevelPaging
+ )
+{
+ DEBUG ((DEBUG_INFO, " CapReg SAGAW bits : 0x%02x\n", CapReg.Bits.SAGAW));
+
+ *Is5LevelPaging = FALSE;
+ if ((CapReg.Bits.SAGAW & BIT3) != 0) {
+ *Is5LevelPaging = TRUE;
+ if ((HostAddressWidth <= 48) &&
+ ((CapReg.Bits.SAGAW & BIT2) != 0)) {
+ *Is5LevelPaging = FALSE;
+ } else {
+ return EFI_UNSUPPORTED;
+ }
+ }
+ if ((CapReg.Bits.SAGAW & (BIT3 | BIT2)) == 0) {
+ return EFI_UNSUPPORTED;
+ }
+ DEBUG ((DEBUG_INFO, " Using %d Level Paging\n", *Is5LevelPaging ? 5 : 4));
+ return EFI_SUCCESS;
+}
+
+
+/**
+ Prepare VTD configuration.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS Prepare Vtd config success
+**/
+EFI_STATUS
+PrepareVtdConfig (
+ IN VTD_INFO *VTdInfo
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+ VTD_UNIT_INFO *VtdUnitInfo;
+ UINTN VtdUnitBaseAddress;
+
+ if (VTdInfo->RegsInfoBuffer == NULL) {
+ VTdInfo->RegsInfoBuffer = AllocateZeroPages (EFI_SIZE_TO_PAGES (sizeof (VTD_REGESTER_THIN_INFO) + sizeof (VTD_UINT128) * VTD_CAP_REG_NFR_MAX));
+ ASSERT (VTdInfo->RegsInfoBuffer != NULL);
+ }
+
+ for (Index = 0; Index < VTdInfo->VTdEngineCount; Index++) {
+ VtdUnitInfo = &VTdInfo->VtdUnitInfo[Index];
+ if (VtdUnitInfo->Done) {
+ continue;
+ }
+ VtdUnitBaseAddress = VtdUnitInfo->VtdUnitBaseAddress;
+ DEBUG ((DEBUG_INFO, "VTd Engine: 0x%08X\n", VtdUnitBaseAddress));
+
+ VtdUnitInfo->VerReg.Uint32 = MmioRead32 (VtdUnitBaseAddress + R_VER_REG);
+ VtdUnitInfo->CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ VtdUnitInfo->ECapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_ECAP_REG);
+ DEBUG ((DEBUG_INFO, " VER_REG : 0x%08X\n", VtdUnitInfo->VerReg.Uint32));
+ DEBUG ((DEBUG_INFO, " CAP_REG : 0x%016lX\n", VtdUnitInfo->CapReg.Uint64));
+ DEBUG ((DEBUG_INFO, " ECAP_REG : 0x%016lX\n", VtdUnitInfo->ECapReg.Uint64));
+
+ Status = VtdCheckUsing5LevelPaging (VTdInfo->HostAddressWidth, VtdUnitInfo->CapReg, &(VtdUnitInfo->Is5LevelPaging));
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "!!!! Page-table type 0x%X is not supported!!!!\n", VtdUnitInfo->CapReg.Bits.SAGAW));
+ return Status;
+ }
+
+ Status = PerpareCacheInvalidationInterface(&VTdInfo->VtdUnitInfo[Index]);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Dump VTd registers if there is error.
+**/
+VOID
+DumpVtdIfError (
+ VOID
+ )
+{
+ VTD_INFO *VTdInfo;
+ UINTN Num;
+ UINTN VtdUnitBaseAddress;
+ UINT16 Index;
+ VTD_REGESTER_THIN_INFO *VtdRegInfo;
+ VTD_FRCD_REG FrcdReg;
+ VTD_CAP_REG CapReg;
+ UINT32 FstsReg32;
+ UINT32 FectlReg32;
+ BOOLEAN HasError;
+
+ VTdInfo = GetVTdInfoHob ();
+ if (VTdInfo == NULL) {
+ return;
+ }
+
+ VtdRegInfo = VTdInfo->RegsInfoBuffer;
+ if (VtdRegInfo == NULL) {
+ return;
+ }
+
+ for (Num = 0; Num < VTdInfo->VTdEngineCount; Num++) {
+ HasError = FALSE;
+ VtdUnitBaseAddress = VTdInfo->VtdUnitInfo[Num].VtdUnitBaseAddress;
+ FstsReg32 = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);
+ if (FstsReg32 != 0) {
+ HasError = TRUE;
+ }
+ FectlReg32 = MmioRead32 (VtdUnitBaseAddress + R_FECTL_REG);
+ if ((FectlReg32 & BIT30) != 0) {
+ HasError = TRUE;
+ }
+
+ CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ for (Index = 0; Index < (UINT16) CapReg.Bits.NFR + 1; Index++) {
+ FrcdReg.Uint64[0] = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG));
+ FrcdReg.Uint64[1] = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ if (FrcdReg.Bits.F != 0) {
+ HasError = TRUE;
+ break;
+ }
+ }
+
+ if (HasError) {
+ DEBUG ((DEBUG_INFO, "\n#### ERROR ####\n"));
+
+ VtdRegInfo->BaseAddress = VtdUnitBaseAddress;
+ VtdRegInfo->GstsReg = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ VtdRegInfo->RtaddrReg = MmioRead64 (VtdUnitBaseAddress + R_RTADDR_REG);;
+ VtdRegInfo->FstsReg = FstsReg32;
+ VtdRegInfo->FectlReg = FectlReg32;
+ VtdRegInfo->IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+
+ CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ for (Index = 0; Index < (UINT16) CapReg.Bits.NFR + 1; Index++) {
+ VtdRegInfo->FrcdReg[Index].Uint64Lo = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG));
+ VtdRegInfo->FrcdReg[Index].Uint64Hi = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ }
+ VtdRegInfo->FrcdRegNum = Index;
+
+ DEBUG ((DEBUG_INFO, "\n#### ERROR ####\n"));
+
+ VtdLibDumpVtdRegsThin (NULL, NULL, VtdRegInfo);
+
+ DEBUG ((DEBUG_INFO, "#### ERROR ####\n\n"));
+
+ VTdLogAddDataEvent (VTDLOG_PEI_REGISTER, VTDLOG_REGISTER_THIN, VtdRegInfo, sizeof (VTD_REGESTER_THIN_INFO) + sizeof (VTD_UINT128) * (VtdRegInfo->FrcdRegNum - 1));
+
+ //
+ // Clear
+ //
+ for (Index = 0; Index < (UINT16) CapReg.Bits.NFR + 1; Index++) {
+ FrcdReg.Uint64[1] = MmioRead64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)));
+ if (FrcdReg.Bits.F != 0) {
+ //
+ // Software writes the value read from this field (F) to Clear it.
+ //
+ MmioWrite64 (VtdUnitBaseAddress + ((CapReg.Bits.FRO * 16) + (Index * 16) + R_FRCD_REG + sizeof(UINT64)), FrcdReg.Uint64[1]);
+ }
+ }
+ MmioWrite32 (VtdUnitBaseAddress + R_FSTS_REG, MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG));
+ }
+ }
+}
\ No newline at end of file
diff --git a/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/TranslationTable.c b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/TranslationTable.c
new file mode 100644
index 000000000..03a4544a0
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Feature/VTd/IntelVTdCorePei/TranslationTable.c
@@ -0,0 +1,926 @@
+/** @file
+
+ Copyright (c) 2020 - 2021, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Uefi.h>
+#include <PiPei.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/IoLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PeiServicesLib.h>
+#include <Library/HobLib.h>
+#include <IndustryStandard/Vtd.h>
+#include <Ppi/IoMmu.h>
+#include <Ppi/VtdInfo.h>
+#include <Ppi/MemoryDiscovered.h>
+#include <Ppi/EndOfPeiPhase.h>
+#include <Guid/VtdPmrInfoHob.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Guid/VtdLogDataHob.h>
+#include "IntelVTdCorePei.h"
+
+#define ALIGN_VALUE_UP(Value, Alignment) (((Value) + (Alignment) - 1) & (~((Alignment) - 1)))
+#define ALIGN_VALUE_LOW(Value, Alignment) ((Value) & (~((Alignment) - 1)))
+
+/**
+ Allocate zero pages.
+
+ @param[in] Pages the number of pages.
+
+ @return the page address.
+ @retval NULL No resource to allocate pages.
+**/
+VOID *
+EFIAPI
+AllocateZeroPages (
+ IN UINTN Pages
+ )
+{
+ VOID *Addr;
+
+ Addr = AllocatePages (Pages);
+ if (Addr == NULL) {
+ return NULL;
+ }
+ ZeroMem (Addr, EFI_PAGES_TO_SIZE (Pages));
+ return Addr;
+}
+
+/**
+ Set second level paging entry attribute based upon IoMmuAccess.
+
+ @param[in] PtEntry The paging entry.
+ @param[in] IoMmuAccess The IOMMU access.
+**/
+VOID
+SetSecondLevelPagingEntryAttribute (
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PtEntry,
+ IN UINT64 IoMmuAccess
+ )
+{
+ PtEntry->Bits.Read = ((IoMmuAccess & EDKII_IOMMU_ACCESS_READ) != 0);
+ PtEntry->Bits.Write = ((IoMmuAccess & EDKII_IOMMU_ACCESS_WRITE) != 0);
+ DEBUG ((DEBUG_VERBOSE, "SetSecondLevelPagingEntryAttribute - 0x%x - 0x%x\n", PtEntry, IoMmuAccess));
+}
+
+/**
+ Create second level paging entry table.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] SecondLevelPagingEntry The second level paging entry.
+ @param[in] MemoryBase The base of the memory.
+ @param[in] MemoryLimit The limit of the memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @return The second level paging entry.
+**/
+VTD_SECOND_LEVEL_PAGING_ENTRY *
+CreateSecondLevelPagingEntryTable (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 MemoryBase,
+ IN UINT64 MemoryLimit,
+ IN UINT64 IoMmuAccess
+ )
+{
+ UINTN Index5;
+ UINTN Index4;
+ UINTN Index3;
+ UINTN Index2;
+ UINTN Lvl5Start;
+ UINTN Lvl5End;
+ UINTN Lvl4PagesStart;
+ UINTN Lvl4PagesEnd;
+ UINTN Lvl4Start;
+ UINTN Lvl4End;
+ UINTN Lvl3Start;
+ UINTN Lvl3End;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl5PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl4PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl3PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl2PtEntry;
+ UINT64 BaseAddress;
+ UINT64 EndAddress;
+ BOOLEAN Is5LevelPaging;
+
+ if (MemoryLimit == 0) {
+ return NULL;
+ }
+
+ Lvl4PagesStart = 0;
+ Lvl4PagesEnd = 0;
+ Lvl4PtEntry = NULL;
+ Lvl5PtEntry = NULL;
+
+ BaseAddress = ALIGN_VALUE_LOW (MemoryBase, SIZE_2MB);
+ EndAddress = ALIGN_VALUE_UP (MemoryLimit, SIZE_2MB);
+ DEBUG ((DEBUG_INFO, "CreateSecondLevelPagingEntryTable: BaseAddress - 0x%016lx, EndAddress - 0x%016lx\n", BaseAddress, EndAddress));
+
+ if (SecondLevelPagingEntry == NULL) {
+ SecondLevelPagingEntry = AllocateZeroPages (1);
+ if (SecondLevelPagingEntry == NULL) {
+ DEBUG ((DEBUG_ERROR, "Could not Alloc LVL4 or LVL5 PT. \n"));
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) SecondLevelPagingEntry, EFI_PAGES_TO_SIZE (1));
+ }
+
+ DEBUG ((DEBUG_INFO, " SecondLevelPagingEntry:0x%016lx\n", (UINT64) (UINTN) SecondLevelPagingEntry));
+ //
+ // If no access is needed, just create not present entry.
+ //
+ if (IoMmuAccess == 0) {
+ DEBUG ((DEBUG_INFO, " SecondLevelPagingEntry:0x%016lx Access 0\n", (UINT64) (UINTN) SecondLevelPagingEntry));
+ return SecondLevelPagingEntry;
+ }
+
+ Is5LevelPaging = VtdUnitInfo->Is5LevelPaging;
+
+ if (Is5LevelPaging) {
+ Lvl5Start = RShiftU64 (BaseAddress, 48) & 0x1FF;
+ Lvl5End = RShiftU64 (EndAddress - 1, 48) & 0x1FF;
+ DEBUG ((DEBUG_INFO, " Lvl5Start - 0x%x, Lvl5End - 0x%x\n", Lvl5Start, Lvl5End));
+
+ Lvl4Start = RShiftU64 (BaseAddress, 39) & 0x1FF;
+ Lvl4End = RShiftU64 (EndAddress - 1, 39) & 0x1FF;
+
+ Lvl4PagesStart = (Lvl5Start<<9) | Lvl4Start;
+ Lvl4PagesEnd = (Lvl5End<<9) | Lvl4End;
+ DEBUG ((DEBUG_INFO, " Lvl4PagesStart - 0x%x, Lvl4PagesEnd - 0x%x\n", Lvl4PagesStart, Lvl4PagesEnd));
+
+ Lvl5PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) SecondLevelPagingEntry;
+ } else {
+ Lvl5Start = RShiftU64 (BaseAddress, 48) & 0x1FF;
+ Lvl5End = Lvl5Start;
+
+ Lvl4Start = RShiftU64 (BaseAddress, 39) & 0x1FF;
+ Lvl4End = RShiftU64 (EndAddress - 1, 39) & 0x1FF;
+ DEBUG ((DEBUG_INFO, " Lvl4Start - 0x%x, Lvl4End - 0x%x\n", Lvl4Start, Lvl4End));
+
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) SecondLevelPagingEntry;
+ }
+
+ for (Index5 = Lvl5Start; Index5 <= Lvl5End; Index5++) {
+ if (Is5LevelPaging) {
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ Lvl5PtEntry[Index5].Uint64 = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index5));
+ ASSERT (FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) Lvl5PtEntry[Index5].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl5PtEntry[Index5], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+ Lvl4Start = Lvl4PagesStart & 0x1FF;
+ if (((Index5+1)<<9) > Lvl4PagesEnd) {
+ Lvl4End = SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) - 1;;
+ Lvl4PagesStart = (Index5+1)<<9;
+ } else {
+ Lvl4End = Lvl4PagesEnd & 0x1FF;
+ }
+ DEBUG ((DEBUG_INFO, " Lvl5(0x%x): Lvl4Start - 0x%x, Lvl4End - 0x%x\n", Index5, Lvl4Start, Lvl4End));
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) (UINTN) VTD_64BITS_ADDRESS(Lvl5PtEntry[Index5].Bits.AddressLo, Lvl5PtEntry[Index5].Bits.AddressHi);
+ }
+
+ for (Index4 = Lvl4Start; Index4 <= Lvl4End; Index4++) {
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ Lvl4PtEntry[Index4].Uint64 = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) Lvl4PtEntry[Index4].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl4PtEntry[Index4], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+
+ Lvl3Start = RShiftU64 (BaseAddress, 30) & 0x1FF;
+ if (ALIGN_VALUE_LOW(BaseAddress + SIZE_1GB, SIZE_1GB) <= EndAddress) {
+ Lvl3End = SIZE_4KB / sizeof (VTD_SECOND_LEVEL_PAGING_ENTRY) - 1;
+ } else {
+ Lvl3End = RShiftU64 (EndAddress - 1, 30) & 0x1FF;
+ }
+ DEBUG ((DEBUG_INFO, " Lvl4(0x%x): Lvl3Start - 0x%x, Lvl3End - 0x%x\n", Index4, Lvl3Start, Lvl3End));
+
+ Lvl3PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) (UINTN) VTD_64BITS_ADDRESS(Lvl4PtEntry[Index4].Bits.AddressLo, Lvl4PtEntry[Index4].Bits.AddressHi);
+ for (Index3 = Lvl3Start; Index3 <= Lvl3End; Index3++) {
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ Lvl3PtEntry[Index3].Uint64 = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL3 PAGE FAIL (0x%x, 0x%x)!!!!!!\n", Index4, Index3));
+ ASSERT(FALSE);
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) Lvl3PtEntry[Index3].Uint64, SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute (&Lvl3PtEntry[Index3], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ }
+
+ Lvl2PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) (UINTN) VTD_64BITS_ADDRESS(Lvl3PtEntry[Index3].Bits.AddressLo, Lvl3PtEntry[Index3].Bits.AddressHi);
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index2++) {
+ Lvl2PtEntry[Index2].Uint64 = BaseAddress;
+ SetSecondLevelPagingEntryAttribute (&Lvl2PtEntry[Index2], IoMmuAccess);
+ Lvl2PtEntry[Index2].Bits.PageSize = 1;
+ BaseAddress += SIZE_2MB;
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) Lvl2PtEntry, SIZE_4KB);
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &Lvl3PtEntry[Lvl3Start], (UINTN) &Lvl3PtEntry[Lvl3End + 1] - (UINTN) &Lvl3PtEntry[Lvl3Start]);
+ if (BaseAddress >= MemoryLimit) {
+ break;
+ }
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &Lvl4PtEntry[Lvl4Start], (UINTN) &Lvl4PtEntry[Lvl4End + 1] - (UINTN) &Lvl4PtEntry[Lvl4Start]);
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &Lvl5PtEntry[Lvl5Start], (UINTN) &Lvl5PtEntry[Lvl5End + 1] - (UINTN) &Lvl5PtEntry[Lvl5Start]);
+
+ DEBUG ((DEBUG_INFO, " SecondLevelPagingEntry:0x%016lx\n", (UINT64) (UINTN) SecondLevelPagingEntry));
+ return SecondLevelPagingEntry;
+}
+
+/**
+ Create context entry.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+
+ @retval EFI_SUCCESS The context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create context entry.
+
+**/
+EFI_STATUS
+CreateContextEntry (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINTN RootPages;
+ UINTN ContextPages;
+ UINTN EntryTablePages;
+ VOID *Buffer;
+ UINTN RootIndex;
+ UINTN ContextIndex;
+ VTD_ROOT_ENTRY *RootEntryBase;
+ VTD_ROOT_ENTRY *RootEntry;
+ VTD_CONTEXT_ENTRY *ContextEntryTable;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+ VTD_SOURCE_ID SourceId;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+ UINT64 Pt;
+
+ if (VtdUnitInfo->RootEntryTable != 0) {
+ return EFI_SUCCESS;
+ }
+
+ RootPages = EFI_SIZE_TO_PAGES (sizeof (VTD_ROOT_ENTRY) * VTD_ROOT_ENTRY_NUMBER);
+ ContextPages = EFI_SIZE_TO_PAGES (sizeof (VTD_CONTEXT_ENTRY) * VTD_CONTEXT_ENTRY_NUMBER);
+ EntryTablePages = RootPages + ContextPages * (VTD_ROOT_ENTRY_NUMBER);
+ Buffer = AllocateZeroPages (EntryTablePages);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_ERROR, "Could not Alloc Root Entry Table.. \n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ DEBUG ((DEBUG_ERROR, "RootEntryTable address - 0x%x\n", Buffer));
+ VtdUnitInfo->RootEntryTable = (UINTN) Buffer;
+ VtdUnitInfo->RootEntryTablePageSize = EntryTablePages;
+ RootEntryBase = (VTD_ROOT_ENTRY *) Buffer;
+ Buffer = (UINT8 *) Buffer + EFI_PAGES_TO_SIZE (RootPages);
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry == 0) {
+ DEBUG ((DEBUG_ERROR, "FixedSecondLevelPagingEntry is empty\n"));
+ ASSERT(FALSE);
+ }
+
+ for (RootIndex = 0; RootIndex < VTD_ROOT_ENTRY_NUMBER; RootIndex++) {
+ SourceId.Index.RootIndex = (UINT8) RootIndex;
+
+ RootEntry = &RootEntryBase[SourceId.Index.RootIndex];
+ RootEntry->Bits.ContextTablePointerLo = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 12);
+ RootEntry->Bits.ContextTablePointerHi = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 32);
+ RootEntry->Bits.Present = 1;
+ Buffer = (UINT8 *)Buffer + EFI_PAGES_TO_SIZE (ContextPages);
+ ContextEntryTable = (VTD_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS(RootEntry->Bits.ContextTablePointerLo, RootEntry->Bits.ContextTablePointerHi);
+
+ for (ContextIndex = 0; ContextIndex < VTD_CONTEXT_ENTRY_NUMBER; ContextIndex++) {
+ SourceId.Index.ContextIndex = (UINT8) ContextIndex;
+ ContextEntry = &ContextEntryTable[SourceId.Index.ContextIndex];
+
+ ContextEntry->Bits.TranslationType = 0;
+ ContextEntry->Bits.FaultProcessingDisable = 0;
+ ContextEntry->Bits.Present = 0;
+
+ ContextEntry->Bits.AddressWidth = VtdUnitInfo->Is5LevelPaging ? 0x3 : 0x2;
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry != 0) {
+ SecondLevelPagingEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) VtdUnitInfo->FixedSecondLevelPagingEntry;
+ Pt = (UINT64)RShiftU64 ((UINT64) (UINTN) SecondLevelPagingEntry, 12);
+ ContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ContextEntry->Bits.DomainIdentifier = ((1 << (UINT8)((UINTN)VtdUnitInfo->CapReg.Bits.ND * 2 + 4)) - 1);
+ ContextEntry->Bits.Present = 1;
+ }
+ }
+ }
+
+ FlushPageTableMemory (VtdUnitInfo, VtdUnitInfo->RootEntryTable, EFI_PAGES_TO_SIZE(EntryTablePages));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Create extended context entry.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+
+ @retval EFI_SUCCESS The extended context entry is created.
+ @retval EFI_OUT_OF_RESOURCE No enough resource to create extended context entry.
+**/
+EFI_STATUS
+CreateExtContextEntry (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ UINTN RootPages;
+ UINTN ContextPages;
+ UINTN EntryTablePages;
+ VOID *Buffer;
+ UINTN RootIndex;
+ UINTN ContextIndex;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntryBase;
+ VTD_EXT_ROOT_ENTRY *ExtRootEntry;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntryTable;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+ VTD_SOURCE_ID SourceId;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry;
+ UINT64 Pt;
+
+ if (VtdUnitInfo->ExtRootEntryTable != 0) {
+ return EFI_SUCCESS;
+ }
+
+ RootPages = EFI_SIZE_TO_PAGES (sizeof (VTD_EXT_ROOT_ENTRY) * VTD_ROOT_ENTRY_NUMBER);
+ ContextPages = EFI_SIZE_TO_PAGES (sizeof (VTD_EXT_CONTEXT_ENTRY) * VTD_CONTEXT_ENTRY_NUMBER);
+ EntryTablePages = RootPages + ContextPages * (VTD_ROOT_ENTRY_NUMBER);
+ Buffer = AllocateZeroPages (EntryTablePages);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_INFO, "Could not Alloc Root Entry Table !\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ DEBUG ((DEBUG_ERROR, "ExtRootEntryTable address - 0x%x\n", Buffer));
+ VtdUnitInfo->ExtRootEntryTable = (UINTN) Buffer;
+ VtdUnitInfo->ExtRootEntryTablePageSize = EntryTablePages;
+ ExtRootEntryBase = (VTD_EXT_ROOT_ENTRY *) Buffer;
+ Buffer = (UINT8 *) Buffer + EFI_PAGES_TO_SIZE (RootPages);
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry == 0) {
+ DEBUG ((DEBUG_ERROR, "FixedSecondLevelPagingEntry is empty\n"));
+ ASSERT(FALSE);
+ }
+
+ for (RootIndex = 0; RootIndex < VTD_ROOT_ENTRY_NUMBER; RootIndex++) {
+ SourceId.Index.RootIndex = (UINT8)RootIndex;
+
+ ExtRootEntry = &ExtRootEntryBase[SourceId.Index.RootIndex];
+ ExtRootEntry->Bits.LowerContextTablePointerLo = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 12);
+ ExtRootEntry->Bits.LowerContextTablePointerHi = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 32);
+ ExtRootEntry->Bits.LowerPresent = 1;
+ ExtRootEntry->Bits.UpperContextTablePointerLo = (UINT32) RShiftU64 ((UINT64) (UINTN) Buffer, 12) + 1;
+ ExtRootEntry->Bits.UpperContextTablePointerHi = (UINT32) RShiftU64 (RShiftU64 ((UINT64) (UINTN) Buffer, 12) + 1, 20);
+ ExtRootEntry->Bits.UpperPresent = 1;
+ Buffer = (UINT8 *) Buffer + EFI_PAGES_TO_SIZE (ContextPages);
+ ExtContextEntryTable = (VTD_EXT_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS (ExtRootEntry->Bits.LowerContextTablePointerLo, ExtRootEntry->Bits.LowerContextTablePointerHi);
+
+ for (ContextIndex = 0; ContextIndex < VTD_CONTEXT_ENTRY_NUMBER; ContextIndex++) {
+ SourceId.Index.ContextIndex = (UINT8) ContextIndex;
+ ExtContextEntry = &ExtContextEntryTable[SourceId.Index.ContextIndex];
+
+ ExtContextEntry->Bits.TranslationType = 0;
+ ExtContextEntry->Bits.FaultProcessingDisable = 0;
+ ExtContextEntry->Bits.Present = 0;
+
+ ExtContextEntry->Bits.AddressWidth = VtdUnitInfo->Is5LevelPaging ? 0x3 : 0x2;
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry != 0) {
+ SecondLevelPagingEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *) VtdUnitInfo->FixedSecondLevelPagingEntry;
+ Pt = (UINT64)RShiftU64 ((UINT64) (UINTN) SecondLevelPagingEntry, 12);
+
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerLo = (UINT32) Pt;
+ ExtContextEntry->Bits.SecondLevelPageTranslationPointerHi = (UINT32) RShiftU64(Pt, 20);
+ ExtContextEntry->Bits.DomainIdentifier = ((1 << (UINT8) ((UINTN) VtdUnitInfo->CapReg.Bits.ND * 2 + 4)) - 1);
+ ExtContextEntry->Bits.Present = 1;
+ }
+ }
+ }
+
+ FlushPageTableMemory (VtdUnitInfo, VtdUnitInfo->ExtRootEntryTable, EFI_PAGES_TO_SIZE(EntryTablePages));
+
+ return EFI_SUCCESS;
+}
+
+#define VTD_PG_R BIT0
+#define VTD_PG_W BIT1
+#define VTD_PG_X BIT2
+#define VTD_PG_EMT (BIT3 | BIT4 | BIT5)
+#define VTD_PG_TM (BIT62)
+
+#define VTD_PG_PS BIT7
+
+#define PAGE_PROGATE_BITS (VTD_PG_TM | VTD_PG_EMT | VTD_PG_W | VTD_PG_R)
+
+#define PAGING_4K_MASK 0xFFF
+#define PAGING_2M_MASK 0x1FFFFF
+#define PAGING_1G_MASK 0x3FFFFFFF
+
+#define PAGING_VTD_INDEX_MASK 0x1FF
+
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
+
+typedef enum {
+ PageNone,
+ Page4K,
+ Page2M,
+ Page1G,
+} PAGE_ATTRIBUTE;
+
+typedef struct {
+ PAGE_ATTRIBUTE Attribute;
+ UINT64 Length;
+ UINT64 AddressMask;
+} PAGE_ATTRIBUTE_TABLE;
+
+PAGE_ATTRIBUTE_TABLE mPageAttributeTable[] = {
+ {Page4K, SIZE_4KB, PAGING_4K_ADDRESS_MASK_64},
+ {Page2M, SIZE_2MB, PAGING_2M_ADDRESS_MASK_64},
+ {Page1G, SIZE_1GB, PAGING_1G_ADDRESS_MASK_64},
+};
+
+/**
+ Return length according to page attributes.
+
+ @param[in] PageAttributes The page attribute of the page entry.
+
+ @return The length of page entry.
+**/
+UINTN
+PageAttributeToLength (
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINTN Index;
+ for (Index = 0; Index < sizeof (mPageAttributeTable) / sizeof (mPageAttributeTable[0]); Index++) {
+ if (PageAttribute == mPageAttributeTable[Index].Attribute) {
+ return (UINTN) mPageAttributeTable[Index].Length;
+ }
+ }
+ return 0;
+}
+
+/**
+ Return page table entry to match the address.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] Address The address to be checked.
+ @param[out] PageAttributes The page attribute of the page entry.
+
+ @return The page entry.
+**/
+VOID *
+GetSecondLevelPageTableEntry (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN PHYSICAL_ADDRESS Address,
+ OUT PAGE_ATTRIBUTE *PageAttribute
+ )
+{
+ UINTN Index1;
+ UINTN Index2;
+ UINTN Index3;
+ UINTN Index4;
+ UINTN Index5;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ UINT64 *L4PageTable;
+ UINT64 *L5PageTable;
+ BOOLEAN Is5LevelPaging;
+
+ Index5 = ((UINTN) RShiftU64 (Address, 48)) & PAGING_VTD_INDEX_MASK;
+ Index4 = ((UINTN) RShiftU64 (Address, 39)) & PAGING_VTD_INDEX_MASK;
+ Index3 = ((UINTN) Address >> 30) & PAGING_VTD_INDEX_MASK;
+ Index2 = ((UINTN) Address >> 21) & PAGING_VTD_INDEX_MASK;
+ Index1 = ((UINTN) Address >> 12) & PAGING_VTD_INDEX_MASK;
+
+ Is5LevelPaging = VtdUnitInfo->Is5LevelPaging;
+
+ if (Is5LevelPaging) {
+ L5PageTable = (UINT64 *) SecondLevelPagingEntry;
+ if (L5PageTable[Index5] == 0) {
+ L5PageTable[Index5] = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (L5PageTable[Index5] == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL5 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) L5PageTable[Index5], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *) &L5PageTable[Index5], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &L5PageTable[Index5], sizeof(L5PageTable[Index5]));
+ }
+ L4PageTable = (UINT64 *) (UINTN) (L5PageTable[Index5] & PAGING_4K_ADDRESS_MASK_64);
+ } else {
+ L4PageTable = (UINT64 *)SecondLevelPagingEntry;
+ }
+
+ if (L4PageTable[Index4] == 0) {
+ L4PageTable[Index4] = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (L4PageTable[Index4] == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL4 PAGE FAIL (0x%x)!!!!!!\n", Index4));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) L4PageTable[Index4], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *) &L4PageTable[Index4], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &L4PageTable[Index4], sizeof(L4PageTable[Index4]));
+ }
+
+ L3PageTable = (UINT64 *) (UINTN) (L4PageTable[Index4] & PAGING_4K_ADDRESS_MASK_64);
+ if (L3PageTable[Index3] == 0) {
+ L3PageTable[Index3] = (UINT64) (UINTN) AllocateZeroPages (1);
+ if (L3PageTable[Index3] == 0) {
+ DEBUG ((DEBUG_ERROR, "!!!!!! ALLOCATE LVL3 PAGE FAIL (0x%x, 0x%x)!!!!!!\n", Index4, Index3));
+ ASSERT(FALSE);
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) L3PageTable[Index3], SIZE_4KB);
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *) &L3PageTable[Index3], EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &L3PageTable[Index3], sizeof (L3PageTable[Index3]));
+ }
+ if ((L3PageTable[Index3] & VTD_PG_PS) != 0) {
+ // 1G
+ *PageAttribute = Page1G;
+ return &L3PageTable[Index3];
+ }
+
+ L2PageTable = (UINT64 *) (UINTN) (L3PageTable[Index3] & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable[Index2] == 0) {
+ L2PageTable[Index2] = Address & PAGING_2M_ADDRESS_MASK_64;
+ SetSecondLevelPagingEntryAttribute ((VTD_SECOND_LEVEL_PAGING_ENTRY *) &L2PageTable[Index2], 0);
+ L2PageTable[Index2] |= VTD_PG_PS;
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) &L2PageTable[Index2], sizeof (L2PageTable[Index2]));
+ }
+ if ((L2PageTable[Index2] & VTD_PG_PS) != 0) {
+ // 2M
+ *PageAttribute = Page2M;
+ return &L2PageTable[Index2];
+ }
+
+ // 4k
+ L1PageTable = (UINT64 *) (UINTN) (L2PageTable[Index2] & PAGING_4K_ADDRESS_MASK_64);
+ if ((L1PageTable[Index1] == 0) && (Address != 0)) {
+ *PageAttribute = PageNone;
+ return NULL;
+ }
+ *PageAttribute = Page4K;
+ return &L1PageTable[Index1];
+}
+
+/**
+ Modify memory attributes of page entry.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] PageEntry The page entry.
+ @param[in] IoMmuAccess The IOMMU access.
+ @param[out] IsModified TRUE means page table modified. FALSE means page table not modified.
+**/
+VOID
+ConvertSecondLevelPageEntryAttribute (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry,
+ IN UINT64 IoMmuAccess,
+ OUT BOOLEAN *IsModified
+ )
+{
+ UINT64 CurrentPageEntry;
+ UINT64 NewPageEntry;
+
+ CurrentPageEntry = PageEntry->Uint64;
+ SetSecondLevelPagingEntryAttribute (PageEntry, IoMmuAccess);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN) PageEntry, sizeof(*PageEntry));
+ NewPageEntry = PageEntry->Uint64;
+ if (CurrentPageEntry != NewPageEntry) {
+ *IsModified = TRUE;
+ DEBUG ((DEBUG_VERBOSE, "ConvertSecondLevelPageEntryAttribute 0x%lx", CurrentPageEntry));
+ DEBUG ((DEBUG_VERBOSE, "->0x%lx\n", NewPageEntry));
+ } else {
+ *IsModified = FALSE;
+ }
+}
+
+/**
+ This function returns if there is need to split page entry.
+
+ @param[in] BaseAddress The base address to be checked.
+ @param[in] Length The length to be checked.
+ @param[in] PageAttribute The page attribute of the page entry.
+
+ @retval SplitAttributes on if there is need to split page entry.
+**/
+PAGE_ATTRIBUTE
+NeedSplitPage (
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN PAGE_ATTRIBUTE PageAttribute
+ )
+{
+ UINT64 PageEntryLength;
+
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+
+ if (((BaseAddress & (PageEntryLength - 1)) == 0) && (Length >= PageEntryLength)) {
+ return PageNone;
+ }
+
+ if (((BaseAddress & PAGING_2M_MASK) != 0) || (Length < SIZE_2MB)) {
+ return Page4K;
+ }
+
+ return Page2M;
+}
+
+/**
+ This function splits one page entry to small page entries.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] PageEntry The page entry to be splitted.
+ @param[in] PageAttribute The page attribute of the page entry.
+ @param[in] SplitAttribute How to split the page entry.
+
+ @retval RETURN_SUCCESS The page entry is splitted.
+ @retval RETURN_UNSUPPORTED The page entry does not support to be splitted.
+ @retval RETURN_OUT_OF_RESOURCES No resource to split page entry.
+**/
+RETURN_STATUS
+SplitSecondLevelPage (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry,
+ IN PAGE_ATTRIBUTE PageAttribute,
+ IN PAGE_ATTRIBUTE SplitAttribute
+ )
+{
+ UINT64 BaseAddress;
+ UINT64 *NewPageEntry;
+ UINTN Index;
+
+ ASSERT (PageAttribute == Page2M || PageAttribute == Page1G);
+
+ if (PageAttribute == Page2M) {
+ //
+ // Split 2M to 4K
+ //
+ ASSERT (SplitAttribute == Page4K);
+ if (SplitAttribute == Page4K) {
+ NewPageEntry = AllocateZeroPages (1);
+ DEBUG ((DEBUG_INFO, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = PageEntry->Uint64 & PAGING_2M_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_4KB * Index) | (PageEntry->Uint64 & PAGE_PROGATE_BITS);
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN)NewPageEntry, SIZE_4KB);
+
+ PageEntry->Uint64 = (UINT64)(UINTN)NewPageEntry;
+ SetSecondLevelPagingEntryAttribute (PageEntry, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN)PageEntry, sizeof(*PageEntry));
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else if (PageAttribute == Page1G) {
+ //
+ // Split 1G to 2M
+ // No need support 1G->4K directly, we should use 1G->2M, then 2M->4K to get more compact page table.
+ //
+ ASSERT (SplitAttribute == Page2M || SplitAttribute == Page4K);
+ if ((SplitAttribute == Page2M || SplitAttribute == Page4K)) {
+ NewPageEntry = AllocateZeroPages (1);
+ DEBUG ((DEBUG_INFO, "Split - 0x%x\n", NewPageEntry));
+ if (NewPageEntry == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ BaseAddress = PageEntry->Uint64 & PAGING_1G_ADDRESS_MASK_64;
+ for (Index = 0; Index < SIZE_4KB / sizeof(UINT64); Index++) {
+ NewPageEntry[Index] = (BaseAddress + SIZE_2MB * Index) | VTD_PG_PS | (PageEntry->Uint64 & PAGE_PROGATE_BITS);
+ }
+ FlushPageTableMemory (VtdUnitInfo, (UINTN)NewPageEntry, SIZE_4KB);
+
+ PageEntry->Uint64 = (UINT64)(UINTN)NewPageEntry;
+ SetSecondLevelPagingEntryAttribute (PageEntry, EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE);
+ FlushPageTableMemory (VtdUnitInfo, (UINTN)PageEntry, sizeof(*PageEntry));
+ return RETURN_SUCCESS;
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ return RETURN_UNSUPPORTED;
+ }
+}
+
+/**
+ Set VTd attribute for a system memory on second level page entry
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+ @param[in] SecondLevelPagingEntry The second level paging entry in VTd table for the device.
+ @param[in] BaseAddress The base of device memory address to be used as the DMA memory.
+ @param[in] Length The length of device memory address to be used as the DMA memory.
+ @param[in] IoMmuAccess The IOMMU access.
+
+ @retval EFI_SUCCESS The IoMmuAccess is set for the memory range specified by BaseAddress and Length.
+ @retval EFI_INVALID_PARAMETER BaseAddress is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is not IoMmu Page size aligned.
+ @retval EFI_INVALID_PARAMETER Length is 0.
+ @retval EFI_INVALID_PARAMETER IoMmuAccess specified an illegal combination of access.
+ @retval EFI_UNSUPPORTED The bit mask of IoMmuAccess is not supported by the IOMMU.
+ @retval EFI_UNSUPPORTED The IOMMU does not support the memory range specified by BaseAddress and Length.
+ @retval EFI_OUT_OF_RESOURCES There are not enough resources available to modify the IOMMU access.
+ @retval EFI_DEVICE_ERROR The IOMMU device reported an error while attempting the operation.
+**/
+EFI_STATUS
+SetSecondLevelPagingAttribute (
+ IN VTD_UNIT_INFO *VtdUnitInfo,
+ IN VTD_SECOND_LEVEL_PAGING_ENTRY *SecondLevelPagingEntry,
+ IN UINT64 BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 IoMmuAccess
+ )
+{
+ VTD_SECOND_LEVEL_PAGING_ENTRY *PageEntry;
+ PAGE_ATTRIBUTE PageAttribute;
+ UINTN PageEntryLength;
+ PAGE_ATTRIBUTE SplitAttribute;
+ EFI_STATUS Status;
+ BOOLEAN IsEntryModified;
+
+ DEBUG ((DEBUG_INFO, "SetSecondLevelPagingAttribute (0x%016lx - 0x%016lx : %x) \n", BaseAddress, Length, IoMmuAccess));
+ DEBUG ((DEBUG_INFO, " SecondLevelPagingEntry Base - 0x%x\n", SecondLevelPagingEntry));
+
+ if (BaseAddress != ALIGN_VALUE(BaseAddress, SIZE_4KB)) {
+ DEBUG ((DEBUG_ERROR, "SetSecondLevelPagingAttribute - Invalid Alignment\n"));
+ return EFI_UNSUPPORTED;
+ }
+ if (Length != ALIGN_VALUE(Length, SIZE_4KB)) {
+ DEBUG ((DEBUG_ERROR, "SetSecondLevelPagingAttribute - Invalid Alignment\n"));
+ return EFI_UNSUPPORTED;
+ }
+
+ while (Length != 0) {
+ PageEntry = GetSecondLevelPageTableEntry (VtdUnitInfo, SecondLevelPagingEntry, BaseAddress, &PageAttribute);
+ if (PageEntry == NULL) {
+ DEBUG ((DEBUG_ERROR, "PageEntry - NULL\n"));
+ return RETURN_UNSUPPORTED;
+ }
+ PageEntryLength = PageAttributeToLength (PageAttribute);
+ SplitAttribute = NeedSplitPage (BaseAddress, Length, PageAttribute);
+ if (SplitAttribute == PageNone) {
+ ConvertSecondLevelPageEntryAttribute (VtdUnitInfo, PageEntry, IoMmuAccess, &IsEntryModified);
+ //
+ // Convert success, move to next
+ //
+ BaseAddress += PageEntryLength;
+ Length -= PageEntryLength;
+ } else {
+ Status = SplitSecondLevelPage (VtdUnitInfo, PageEntry, PageAttribute, SplitAttribute);
+ if (RETURN_ERROR (Status)) {
+ DEBUG ((DEBUG_ERROR, "SplitSecondLevelPage - %r\n", Status));
+ return RETURN_UNSUPPORTED;
+ }
+ //
+ // Just split current page
+ // Convert success in next around
+ //
+ }
+ }
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Create Fixed Second Level Paging Entry.
+
+ @param[in] VtdUnitInfo The VTd engine unit information.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCES Setup translation table fail.
+
+**/
+EFI_STATUS
+CreateFixedSecondLevelPagingEntry (
+ IN VTD_UNIT_INFO *VtdUnitInfo
+ )
+{
+ EFI_STATUS Status;
+ UINT64 IoMmuAccess;
+ UINT64 BaseAddress;
+ UINT64 Length;
+ VOID *Hob;
+ DMA_BUFFER_INFO *DmaBufferInfo;
+
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry != 0) {
+ return EFI_SUCCESS;
+ }
+
+ VtdUnitInfo->FixedSecondLevelPagingEntry = (UINTN) CreateSecondLevelPagingEntryTable (VtdUnitInfo, NULL, 0, SIZE_4GB, 0);
+ if (VtdUnitInfo->FixedSecondLevelPagingEntry == 0) {
+ DEBUG ((DEBUG_ERROR, "FixedSecondLevelPagingEntry is empty\n"));
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ Hob = GetFirstGuidHob (&mDmaBufferInfoGuid);
+ DmaBufferInfo = GET_GUID_HOB_DATA (Hob);
+ BaseAddress = DmaBufferInfo->DmaBufferBase;
+ Length = DmaBufferInfo->DmaBufferSize;
+ IoMmuAccess = EDKII_IOMMU_ACCESS_READ | EDKII_IOMMU_ACCESS_WRITE;
+
+ DEBUG ((DEBUG_INFO, " BaseAddress = 0x%lx\n", BaseAddress));
+ DEBUG ((DEBUG_INFO, " Length = 0x%lx\n", Length));
+ DEBUG ((DEBUG_INFO, " IoMmuAccess = 0x%lx\n", IoMmuAccess));
+
+ Status = SetSecondLevelPagingAttribute (VtdUnitInfo, (VTD_SECOND_LEVEL_PAGING_ENTRY*) VtdUnitInfo->FixedSecondLevelPagingEntry, BaseAddress, Length, IoMmuAccess);
+
+ return Status;
+}
+/**
+ Setup VTd translation table.
+
+ @param[in] VTdInfo The VTd engine context information.
+
+ @retval EFI_SUCCESS Setup translation table successfully.
+ @retval EFI_OUT_OF_RESOURCES Setup translation table fail.
+
+**/
+EFI_STATUS
+SetupTranslationTable (
+ IN VTD_INFO *VTdInfo
+ )
+{
+ EFI_STATUS Status;
+ UINTN Index;
+ VTD_UNIT_INFO *VtdUnitInfo;
+
+ for (Index = 0; Index < VTdInfo->VTdEngineCount; Index++) {
+ VtdUnitInfo = &VTdInfo->VtdUnitInfo[Index];
+ if (VtdUnitInfo->Done) {
+ continue;
+ }
+
+ Status = CreateFixedSecondLevelPagingEntry (VtdUnitInfo);
+ if (EFI_ERROR (Status)) {
+ DEBUG ((DEBUG_INFO, "CreateFixedSecondLevelPagingEntry failed - %r\n", Status));
+ return Status;
+ }
+
+ if (VtdUnitInfo->ECapReg.Bits.SMTS) {
+ if (VtdUnitInfo->ECapReg.Bits.DEP_24) {
+ DEBUG ((DEBUG_ERROR,"ECapReg.bit24 is not zero\n"));
+ ASSERT(FALSE);
+ Status = EFI_UNSUPPORTED;
+ } else {
+ Status = CreateContextEntry (VtdUnitInfo);
+ }
+ } else {
+ if (VtdUnitInfo->ECapReg.Bits.DEP_24) {
+ //
+ // To compatible with pervious VTd engine
+ // It was ECS(Extended Context Support) bit.
+ //
+ Status = CreateExtContextEntry (VtdUnitInfo);
+ } else {
+ Status = CreateContextEntry (VtdUnitInfo);
+ }
+ }
+
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
+ }
+ return EFI_SUCCESS;
+}
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Include/Guid/VtdLogDataHob.h b/Silicon/Intel/IntelSiliconPkg/Include/Guid/VtdLogDataHob.h
new file mode 100644
index 000000000..7863a257a
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Include/Guid/VtdLogDataHob.h
@@ -0,0 +1,151 @@
+/** @file
+ The definition for VTD Log Data Hob.
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+**/
+
+
+#ifndef _VTD_LOG_DATA_HOB_H_
+#define _VTD_LOG_DATA_HOB_H_
+
+#include <IndustryStandard/Vtd.h>
+
+#define VTDLOG_LOG_TYPE(_id_) ((UINT64) 1 << (_id_))
+
+typedef enum {
+ VTDLOG_PEI_BASIC = 0, // Start ID for PEI basic log
+ VTDLOG_PEI_PRE_MEM_DMA_PROTECT = 1, // PRE-MEM phase
+ VTDLOG_PEI_PMR_LOW_MEMORY_RANGE = 2,
+ VTDLOG_PEI_PMR_HIGH_MEMORY_RANGE = 3,
+ VTDLOG_PEI_PROTECT_MEMORY_RANGE = 4,
+ VTDLOG_PEI_POST_MEM_ENABLE_DMA_PROTECT = 5,
+ VTDLOG_PEI_POST_MEM_DISABLE_DMA_PROTECT = 6,
+ VTDLOG_PEI_QUEUED_INVALIDATION = 7,
+ VTDLOG_PEI_REGISTER = 8,
+ VTDLOG_PEI_VTD_ERROR = 9,
+
+ VTDLOG_PEI_ADVANCED = 16, // Start ID for PEI advanced log
+ VTDLOG_PEI_PPI_ALLOC_BUFFER = 17,
+ VTDLOG_PEI_PPI_MAP = 18,
+
+ VTDLOG_DXE_BASIC = 24, // Start ID for DXE basic log
+ VTDLOG_DXE_DMAR_TABLE = 25,
+ VTDLOG_DXE_SETUP_VTD = 26,
+ VTDLOG_DXE_PCI_DEVICE = 27,
+ VTDLOG_DXE_REGISTER = 28,
+ VTDLOG_DXE_ENABLE_DMAR = 29,
+ VTDLOG_DXE_DISABLE_DMAR = 30,
+ VTDLOG_DXE_DISABLE_PMR = 31,
+ VTDLOG_DXE_INSTALL_IOMMU_PROTOCOL = 32,
+ VTDLOG_DXE_QUEUED_INVALIDATION = 33,
+
+ VTDLOG_DXE_ADVANCED = 44, // Start ID for DXE advanced log
+ VTDLOG_DXE_IOMMU_ALLOC_BUFFER = 45,
+ VTDLOG_DXE_IOMMU_FREE_BUFFER = 46,
+ VTDLOG_DXE_IOMMU_MAP = 47,
+ VTDLOG_DXE_IOMMU_UNMAP = 48,
+ VTDLOG_DXE_IOMMU_SET_ATTRIBUTE = 49,
+ VTDLOG_DXE_ROOT_TABLE = 50,
+} VTDLOG_EVENT_TYPE;
+
+#define VTD_LOG_PEI_PRE_MEM_BAR_MAX 64
+
+//
+// Code of VTDLOG_PEI_BASIC / VTDLOG_DXE_BASIC
+//
+#define VTD_LOG_ERROR_BUFFER_FULL (1<<0)
+
+//
+// Code of VTDLOG_PEI_PRE_MEM_DMA_PROTECT_MODE
+//
+#define VTD_LOG_PEI_PRE_MEM_NOT_USED 0
+#define VTD_LOG_PEI_PRE_MEM_DISABLE 1
+#define VTD_LOG_PEI_PRE_MEM_ADM 2
+#define VTD_LOG_PEI_PRE_MEM_TE 3
+#define VTD_LOG_PEI_PRE_MEM_PMR 4
+
+//
+// Code of VTDLOG_PEI_QUEUED_INVALIDATION
+//
+#define VTD_LOG_QI_DISABLE 0
+#define VTD_LOG_QI_ENABLE 1
+#define VTD_LOG_QI_ERROR_OUT_OF_RESOURCES 2
+
+//
+// Code of VTDLOG_PEI_VTD_ERROR
+//
+#define VTD_LOG_PEI_VTD_ERROR_PPI_ALLOC 1
+#define VTD_LOG_PEI_VTD_ERROR_PPI_MAP 2
+
+// Code of VTDLOG_PEI_REGISTER / VTDLOG_DXE_REGISTER
+#define VTDLOG_REGISTER_ALL 0
+#define VTDLOG_REGISTER_THIN 1
+#define VTDLOG_REGISTER_QI 2
+
+#pragma pack(1)
+
+//
+// Item head
+//
+typedef struct {
+ UINT32 DataSize;
+ UINT64 LogType;
+ UINT64 Timestamp;
+}VTDLOG_EVENT_HEADER;
+
+//
+// Struct for type = VTDLOG_PEI_REGISTER
+// VTDLOG_DXE_REGISTER
+// VTDLOG_DXE_DMAR_TABLE
+// VTDLOG_DXE_IOMMU_SET_ATTRIBUTE
+// VTDLOG_DXE_PCI_DEVICE
+// VTDLOG_DXE_ROOT_TABLE
+//
+typedef struct {
+ VTDLOG_EVENT_HEADER Header;
+ UINT64 Param;
+ UINT8 Data[1];
+} VTDLOG_EVENT_CONTEXT;
+
+//
+// Struct for rest of the types
+//
+typedef struct {
+ VTDLOG_EVENT_HEADER Header;
+ UINT64 Data1;
+ UINT64 Data2;
+}VTDLOG_EVENT_2PARAM;
+
+//
+// Struct for VTd log event
+//
+typedef union{
+ VTDLOG_EVENT_HEADER EventHeader;
+ VTDLOG_EVENT_2PARAM CommenEvent;
+ VTDLOG_EVENT_CONTEXT ContextEvent;
+} VTDLOG_EVENT;
+
+//
+// Information for PEI pre-memory phase
+//
+typedef struct {
+ UINT8 Mode;
+ UINT8 Status;
+ UINT32 BarAddress;
+} VTDLOG_PEI_PRE_MEM_INFO;
+
+//
+// Buffer struct for PEI phase
+//
+typedef struct {
+ UINT8 VtdLogPeiError;
+ VTDLOG_PEI_PRE_MEM_INFO PreMemInfo[VTD_LOG_PEI_PRE_MEM_BAR_MAX];
+ UINT32 PostMemBufferUsed;
+ UINT64 PostMemBuffer;
+} VTDLOG_PEI_BUFFER_HOB;
+
+#pragma pack()
+
+#endif // _VTD_LOG_DATA_HOB_H_
+
diff --git a/Silicon/Intel/IntelSiliconPkg/Include/Library/IntelVTdPeiDxeLib.h b/Silicon/Intel/IntelSiliconPkg/Include/Library/IntelVTdPeiDxeLib.h
new file mode 100644
index 000000000..e39619a71
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Include/Library/IntelVTdPeiDxeLib.h
@@ -0,0 +1,423 @@
+/** @file
+ Intel VTd library definitions.
+
+ Copyright (c) 2023 Intel Corporation. All rights reserved. <BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+**/
+#ifndef _INTEL_VTD_PEI_DXE_LIB_H_
+#define _INTEL_VTD_PEI_DXE_LIB_H_
+
+//
+// Include files
+//
+#include <Uefi/UefiBaseType.h>
+#include <Library/DebugLib.h>
+#include <Protocol/VtdLog.h>
+#include <Protocol/PlatformVtdPolicy.h>
+
+#if defined (EXT_CALLBACK)
+ #define _VTDLIB_DEBUG(PrintLevel, ...) \
+ do { \
+ VtdLogEventCallback (Context, CallbackHandle, PrintLevel, ##__VA_ARGS__); \
+ } while (FALSE)
+ #define VTDLIB_DEBUG(Expression) _VTDLIB_DEBUG Expression
+#else
+ #define VTDLIB_DEBUG(Expression) DEBUG(Expression)
+#endif
+
+#pragma pack(1)
+
+typedef struct {
+ UINT8 DeviceType;
+ VTD_SOURCE_ID PciSourceId;
+ EDKII_PLATFORM_VTD_PCI_DEVICE_ID PciDeviceId;
+ // for statistic analysis
+ UINT64 AccessCount;
+} PCI_DEVICE_DATA;
+
+typedef struct {
+ BOOLEAN IncludeAllFlag;
+ UINT16 Segment;
+ UINT32 PciDeviceDataMaxNumber;
+ UINT32 PciDeviceDataNumber;
+ PCI_DEVICE_DATA PciDeviceData[1];
+} PCI_DEVICE_INFORMATION;
+
+typedef struct {
+ UINT64 Uint64Lo;
+ UINT64 Uint64Hi;
+}VTD_UINT128;
+
+typedef struct {
+ UINT64 BaseAddress;
+ UINT32 VerReg;
+ UINT64 CapReg;
+ UINT64 EcapReg;
+ UINT32 GstsReg;
+ UINT64 RtaddrReg;
+ UINT64 CcmdReg;
+ UINT32 FstsReg;
+ UINT32 FectlReg;
+ UINT32 FedataReg;
+ UINT32 FeaddrReg;
+ UINT32 FeuaddrReg;
+ UINT64 IqercdReg;
+ UINT64 IvaReg;
+ UINT64 IotlbReg;
+ UINT16 FrcdRegNum; // Number of FRCD Registers
+ VTD_UINT128 FrcdReg[1];
+} VTD_REGESTER_INFO;
+
+typedef struct {
+ UINT64 BaseAddress;
+ UINT32 FstsReg;
+ UINT64 IqercdReg;
+} VTD_REGESTER_QI_INFO;
+
+typedef struct {
+ UINT64 BaseAddress;
+ UINT32 GstsReg;
+ UINT64 RtaddrReg;
+ UINT32 FstsReg;
+ UINT32 FectlReg;
+ UINT64 IqercdReg;
+ UINT16 FrcdRegNum; // Number of FRCD Registers
+ VTD_UINT128 FrcdReg[1];
+} VTD_REGESTER_THIN_INFO;
+
+typedef struct {
+ VTD_SOURCE_ID SourceId;
+ EFI_PHYSICAL_ADDRESS DeviceAddress;
+ UINT64 Length;
+ UINT64 IoMmuAccess;
+ EFI_STATUS Status;
+} VTD_PROTOCOL_SET_ATTRIBUTE;
+
+typedef struct {
+ UINT64 BaseAddress;
+ UINT64 TableAddress;
+ BOOLEAN Is5LevelPaging;
+} VTD_ROOT_TABLE_INFO;
+
+#pragma pack()
+
+/**
+ @brief This callback function is to handle the Vtd log strings.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Context
+ @param[in] ErrorLevel The error level of the debug message.
+ @param[in] Buffer Event string
+**/
+typedef
+VOID
+(EFIAPI *EDKII_VTD_LIB_STRING_CB) (
+ IN VOID *Context,
+ IN UINTN ErrorLevel,
+ IN CHAR8 *Buffer
+ );
+
+/**
+ @brief This function is to dump DMAR ACPI table.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Event Context
+ @param[in out] CallbackHandle Callback Handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmar (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ );
+
+/**
+ @brief This function is to dump DRHD DMAR ACPI table.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Event Context
+ @param[in out] CallbackHandle Callback Handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmarDrhd (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ );
+
+/**
+ @brief This function is to dump the PCI device information of the VTd engine.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Event Context
+ @param[in out] CallbackHandle Callback Handler
+ @param[in] PciDeviceInfo PCI device information
+**/
+VOID
+VtdLibDumpPciDeviceInfo (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN PCI_DEVICE_INFORMATION *PciDeviceInfo
+ );
+
+/**
+ @brief This function is to dump DMAR context entry table.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] RootEntry DMAR root entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpDmarContextEntryTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_ROOT_ENTRY *RootEntry,
+ IN BOOLEAN Is5LevelPaging
+ );
+
+/**
+ @brief This function is to dump DMAR extended context entry table.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] ExtRootEntry DMAR extended root entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpDmarExtContextEntryTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_EXT_ROOT_ENTRY *ExtRootEntry,
+ IN BOOLEAN Is5LevelPaging
+ );
+
+/**
+ @brief This function is to dump VTd registers.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers Information
+**/
+VOID
+VtdLibDumpVtdRegsAll (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_INFO *VtdRegInfo
+ );
+
+/**
+ @brief This function is to dump VTd registers.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers Information
+**/
+VOID
+VtdLibDumpVtdRegsThin (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_THIN_INFO *VtdRegInfo
+ );
+
+/**
+ @brief This function is to decode log event context.
+
+ [Consumption]
+ Dump VTd log
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event Event struct
+
+ @retval TRUE Decode event success
+ @retval FALSE Unknown event
+**/
+BOOLEAN
+VtdLibDecodeEvent (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT *Event
+ );
+
+/**
+ @brief Pre-boot DMA protection End Process
+
+ +----------------------+
+ | OnExitBootServices |
+ +----------------------+
+ ||
+ \/
++-------------------------------------------------+
+| Flush Write Buffer |
+| VtdLibFlushWriteBuffer () |
++-------------------------------------------------+
+ ||
+ \/
++-------------------------------------------------+
+| Invalidate Context Cache |
+| VtdLibSubmitQueuedInvalidationDescriptor () |
++-------------------------------------------------+
+ ||
+ \/
++-------------------------------------------------+
+| Invalidate IOTLB |
+| VtdLibSubmitQueuedInvalidationDescriptor () |
++-------------------------------------------------+
+ ||
+ \/
++-------------------------------------------------+
+| Disable DMAR |
+| VtdLibDisableDmar () |
++-------------------------------------------------+
+ ||
+ \/
++-------------------------------------------------+
+| Disable Queued Invalidation interface |
+| VtdLibDisableQueuedInvalidationInterface () |
++-------------------------------------------------+
+
+**/
+
+/**
+ @brief This function is to flush VTd engine write buffer.
+
+ [Consumption]
+ Operate VTd engine
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+VtdLibFlushWriteBuffer (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ @brief This function is to clear Global Command Register Bits.
+
+ [Consumption]
+ Operate VTd engine
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] BitMask Bit mask.
+**/
+VOID
+VtdLibClearGlobalCommandRegisterBits (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT32 BitMask
+ );
+
+/**
+ @brief This function is to set VTd Global Command Register Bits.
+
+ [Consumption]
+ Operate VTd engine
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] BitMask Bit mask.
+**/
+VOID
+VtdLibSetGlobalCommandRegisterBits (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT32 BitMask
+ );
+
+/**
+ @brief This function is to disable DMAR.
+
+ [Consumption]
+ Operate VTd engine
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS DMAR translation is disabled.
+**/
+EFI_STATUS
+VtdLibDisableDmar (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ @brief This function is to disable PMR.
+
+ [Consumption]
+ Operate VTd engine
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS PMR is disabled.
+ @retval EFI_UNSUPPORTED PMR is not supported.
+ @retval EFI_NOT_STARTED PMR was not enabled.
+**/
+EFI_STATUS
+VtdLibDisablePmr (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ @brief This function is to disable queued invalidation interface
+
+ [Introduction]
+ Disable queued invalidation interface.
+
+ [Consumption]
+ Operate VTd engine
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+VtdLibDisableQueuedInvalidationInterface (
+ IN UINTN VtdUnitBaseAddress
+ );
+
+/**
+ @brief This function is to submit a queued invalidation descriptor
+
+ [Introduction]
+ Submit the queued invalidation descriptor to the remapping
+ hardware unit and wait for its completion.
+
+ [Consumption]
+ Operate VTd engine
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Desc The invalidate descriptor
+ @param[in] ClearFaultBits TRUE - This API will clear the queued invalidation fault bits if any.
+ FALSE - The caller need to check and clear the queued invalidation fault bits.
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval RETURN_DEVICE_ERROR A fault is detected.
+ @retval EFI_INVALID_PARAMETER Parameter is invalid.
+ @retval EFI_DEVICE_ERROR Detect fault, need to clear fault bits if ClearFaultBits is FALSE
+**/
+EFI_STATUS
+VtdLibSubmitQueuedInvalidationDescriptor (
+ IN UINTN VtdUnitBaseAddress,
+ IN VOID *Desc,
+ IN BOOLEAN ClearFaultBits
+ );
+
+#endif
diff --git a/Silicon/Intel/IntelSiliconPkg/Include/Protocol/VtdLog.h b/Silicon/Intel/IntelSiliconPkg/Include/Protocol/VtdLog.h
new file mode 100644
index 000000000..7c2894e81
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Include/Protocol/VtdLog.h
@@ -0,0 +1,59 @@
+/** @file
+ The definition for VTD Log.
+
+ Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef __VTD_LOG_PROTOCOL_H__
+#define __VTD_LOG_PROTOCOL_H__
+
+#include <Guid/VtdLogDataHob.h>
+
+#define EDKII_VTD_LOG_PROTOCOL_GUID \
+ { \
+ 0x1e271819, 0xa3ca, 0x481f, { 0xbd, 0xff, 0x92, 0x78, 0x2f, 0x9a, 0x99, 0x3c } \
+ }
+
+typedef struct _EDKII_VTD_LOG_PROTOCOL EDKII_VTD_LOG_PROTOCOL;
+
+#define EDKII_VTD_LOG_PROTOCOL_REVISION 0x00010000
+
+/**
+ Callback function of each VTd log event.
+ @param[in] Context Event context
+ @param[in] Header Event header
+
+ @retval UINT32 Number of events
+**/
+typedef
+VOID
+(EFIAPI *EDKII_VTD_LOG_HANDLE_EVENT) (
+ IN VOID *Context,
+ IN VTDLOG_EVENT_HEADER *Header
+ );
+
+/**
+ Get the VTd log events.
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback function for each VTd log event
+
+ @retval UINT32 Number of events
+**/
+typedef
+UINT64
+(EFIAPI *EDKII_VTD_LOG_GET_EVENTS) (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LOG_HANDLE_EVENT CallbackHandle
+ );
+
+struct _EDKII_VTD_LOG_PROTOCOL {
+ UINT64 Revision;
+ EDKII_VTD_LOG_GET_EVENTS GetEvents;
+};
+
+extern EFI_GUID gEdkiiVTdLogProtocolGuid;
+
+#endif
+
diff --git a/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dec b/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dec
index cad22acda..ec8690a8d 100644
--- a/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dec
+++ b/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dec
@@ -73,6 +73,9 @@
## HOB GUID to get memory information after MRC is done. The hob data will be used to set the PMR ranges
gVtdPmrInfoDataHobGuid = {0x6fb61645, 0xf168, 0x46be, { 0x80, 0xec, 0xb5, 0x02, 0x38, 0x5e, 0xe7, 0xe7 } }
+ ## HOB GUID to get VTd log data.
+ gVTdLogBufferHobGuid = {0xc8049121, 0xdf91, 0x4dfd, { 0xad, 0xcb, 0x1c, 0x55, 0x85, 0x09, 0x6d, 0x3b } }
+
## Include/Guid/MicrocodeShadowInfoHob.h
gEdkiiMicrocodeShadowInfoHobGuid = { 0x658903f9, 0xda66, 0x460d, { 0x8b, 0xb0, 0x9d, 0x2d, 0xdf, 0x65, 0x44, 0x59 } }
@@ -119,6 +122,8 @@
gPchSmmSpi2ProtocolGuid = { 0x2d1c0c43, 0x20d3, 0x40ae, { 0x99, 0x07, 0x2d, 0xf0, 0xe7, 0x91, 0x21, 0xa5 } }
gEdkiiPlatformVTdPolicyProtocolGuid = { 0x3d17e448, 0x466, 0x4e20, { 0x99, 0x9f, 0xb2, 0xe1, 0x34, 0x88, 0xee, 0x22 }}
+ gEdkiiVTdLogProtocolGuid = { 0x1e271819, 0xa3ca, 0x481f, { 0xbd, 0xff, 0x92, 0x78, 0x2f, 0x9a, 0x99, 0x3c }}
+
gIntelDieInfoProtocolGuid = { 0xAED8A0A1, 0xFDE6, 0x4CF2, { 0xA3, 0x85, 0x08, 0xF1, 0x25, 0xF2, 0x40, 0x37 }}
## Protocol for device security policy.
@@ -207,3 +212,19 @@
# non-zero: The size of an additional NVS region following the Regular variable region.<BR>
# @Prompt Additional NVS Region Size.
gIntelSiliconPkgTokenSpaceGuid.PcdFlashNvStorageAdditionalSize|0x00000000|UINT32|0x0000000F
+
+ ## Declares VTd LOG Output Level.<BR><BR>
+ # 0 : Disable VTd Log
+ # 1 : Enable Basic Log
+ # 2 : Enable All Log
+ # @Prompt The VTd Log Output Level.
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdLogLevel|0x02|UINT8|0x00000017
+
+ ## Declares VTd PEI POST-MEM LOG buffer size.<BR><BR>
+ # @Prompt The VTd PEI Post-Mem Log buffer size. 8k
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdPeiPostMemLogBufferSize|0x00002000|UINT32|0x00000019
+
+ ## Declares VTd DXE LOG buffer size.<BR><BR>
+ # @Prompt The VTd DXE Log buffer size. 4M
+ gIntelSiliconPkgTokenSpaceGuid.PcdVTdDxeLogBufferSize|0x00400000|UINT32|0x0000001A
+
diff --git a/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dsc b/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dsc
index 170eb480a..c8ff40b38 100644
--- a/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dsc
+++ b/Silicon/Intel/IntelSiliconPkg/IntelSiliconPkg.dsc
@@ -45,6 +45,7 @@
UefiBootServicesTableLib|MdePkg/Library/UefiBootServicesTableLib/UefiBootServicesTableLib.inf
UefiDriverEntryPoint|MdePkg/Library/UefiDriverEntryPoint/UefiDriverEntryPoint.inf
VariableFlashInfoLib|MdeModulePkg/Library/BaseVariableFlashInfoLib/BaseVariableFlashInfoLib.inf
+ IntelVTdPeiDxeLib|IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf
[LibraryClasses.common.PEIM]
PeimEntryPoint|MdePkg/Library/PeimEntryPoint/PeimEntryPoint.inf
diff --git a/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c
new file mode 100644
index 000000000..1e65115cb
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.c
@@ -0,0 +1,1812 @@
+/** @file
+ Source code file for Intel VTd PEI DXE library.
+
+Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/PrintLib.h>
+#include <Library/IoLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/IntelVTdPeiDxeLib.h>
+#include <IndustryStandard/Vtd.h>
+
+//
+// Define the maximum message length that this library supports
+//
+#define MAX_STRING_LENGTH (0x100)
+
+#define VTD_64BITS_ADDRESS(Lo, Hi) (LShiftU64 (Lo, 12) | LShiftU64 (Hi, 32))
+
+/**
+ Produces a Null-terminated ASCII string in an output buffer based on a Null-terminated
+ ASCII format string and variable argument list.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] ErrorLevel The error level of the debug message.
+ @param[in] FormatString A Null-terminated ASCII format string.
+ @param[in] ... Variable argument list whose contents are accessed based on the format string specified by FormatString.
+
+ @return The number of ASCII characters in the produced output buffer not including the
+ Null-terminator.
+**/
+UINTN
+EFIAPI
+VtdLogEventCallback (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN UINTN ErrorLevel,
+ IN CONST CHAR8 *FormatString,
+ ...
+ )
+{
+ CHAR8 Buffer[MAX_STRING_LENGTH];
+ VA_LIST Marker;
+ UINTN NumberOfPrinted;
+
+ if ((CallbackHandle == NULL) || (FormatString == NULL)) {
+ return 0;
+ }
+
+ VA_START (Marker, FormatString);
+ NumberOfPrinted = AsciiVSPrint (Buffer, sizeof (Buffer), FormatString, Marker);
+ VA_END (Marker);
+
+ if (NumberOfPrinted > 0) {
+ CallbackHandle (Context, ErrorLevel, Buffer);
+ }
+
+ return NumberOfPrinted;
+}
+
+/**
+ Dump DMAR DeviceScopeEntry.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] DmarDeviceScopeEntry DMAR DeviceScopeEntry
+**/
+VOID
+VtdLibDumpDmarDeviceScopeEntry (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry
+ )
+{
+ UINTN PciPathNumber;
+ UINTN PciPathIndex;
+ EFI_ACPI_DMAR_PCI_PATH *PciPath;
+
+ if (DmarDeviceScopeEntry == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * DMA-Remapping Device Scope Entry Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " DMAR Device Scope Entry address ...................... 0x%016lx\n" :
+ " DMAR Device Scope Entry address ...................... 0x%08x\n",
+ DmarDeviceScopeEntry
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Device Scope Entry Type ............................ 0x%02x\n",
+ DmarDeviceScopeEntry->Type
+ ));
+ switch (DmarDeviceScopeEntry->Type) {
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_ENDPOINT:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " PCI Endpoint Device\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_PCI_BRIDGE:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " PCI Sub-hierachy\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_IOAPIC:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " IOAPIC\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_MSI_CAPABLE_HPET:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " MSI Capable HPET\n"
+ ));
+ break;
+ case EFI_ACPI_DEVICE_SCOPE_ENTRY_TYPE_ACPI_NAMESPACE_DEVICE:
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ACPI Namespace Device\n"
+ ));
+ break;
+ default:
+ break;
+ }
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................. 0x%02x\n",
+ DmarDeviceScopeEntry->Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Enumeration ID ..................................... 0x%02x\n",
+ DmarDeviceScopeEntry->EnumerationId
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Starting Bus Number ................................ 0x%02x\n",
+ DmarDeviceScopeEntry->StartBusNumber
+ ));
+
+ PciPathNumber = (DmarDeviceScopeEntry->Length - sizeof(EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER)) / sizeof(EFI_ACPI_DMAR_PCI_PATH);
+ PciPath = (EFI_ACPI_DMAR_PCI_PATH *)(DmarDeviceScopeEntry + 1);
+ for (PciPathIndex = 0; PciPathIndex < PciPathNumber; PciPathIndex++) {
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Device ............................................. 0x%02x\n",
+ PciPath[PciPathIndex].Device
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Function ........................................... 0x%02x\n",
+ PciPath[PciPathIndex].Function
+ ));
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " *************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR SIDP table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Sidp DMAR SIDP table
+**/
+VOID
+VtdLibDumpDmarSidp (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_SIDP_HEADER *Sidp
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN SidpLen;
+
+ if (Sidp == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * SoC Integrated Device Property Reporting Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " SIDP address ........................................... 0x%016lx\n" :
+ " SIDP address ........................................... 0x%08x\n",
+ Sidp
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Sidp->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Sidp->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Sidp->SegmentNumber
+ ));
+
+ SidpLen = Sidp->Header.Length - sizeof(EFI_ACPI_DMAR_SIDP_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Sidp + 1);
+ while (SidpLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ SidpLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR SATC table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Satc DMAR SATC table
+**/
+VOID
+VtdLibDumpDmarSatc (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_SATC_HEADER *Satc
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN SatcLen;
+
+ if (Satc == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * ACPI Soc Integrated Address Translation Cache reporting Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " SATC address ........................................... 0x%016lx\n" :
+ " SATC address ........................................... 0x%08x\n",
+ Satc
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Satc->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Satc->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Satc->Flags
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Satc->SegmentNumber
+ ));
+
+ SatcLen = Satc->Header.Length - sizeof(EFI_ACPI_DMAR_SATC_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Satc + 1);
+ while (SatcLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ SatcLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR ANDD table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Andd DMAR ANDD table
+**/
+VOID
+VtdLibDumpDmarAndd (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_ANDD_HEADER *Andd
+ )
+{
+ if (Andd == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * ACPI Name-space Device Declaration Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " ANDD address ........................................... 0x%016lx\n" :
+ " ANDD address ........................................... 0x%08x\n",
+ Andd
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Andd->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Andd->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ACPI Device Number ................................... 0x%02x\n",
+ Andd->AcpiDeviceNumber
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ACPI Object Name ..................................... '%a'\n",
+ (Andd + 1)
+ ));
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR RHSA table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Rhsa DMAR RHSA table
+**/
+VOID
+VtdLibDumpDmarRhsa (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_RHSA_HEADER *Rhsa
+ )
+{
+ if (Rhsa == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * Remapping Hardware Status Affinity Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " RHSA address ........................................... 0x%016lx\n" :
+ " RHSA address ........................................... 0x%08x\n",
+ Rhsa
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Rhsa->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Rhsa->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Register Base Address ................................ 0x%016lx\n",
+ Rhsa->RegisterBaseAddress
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Proximity Domain ..................................... 0x%08x\n",
+ Rhsa->ProximityDomain
+ ));
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR ATSR table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Atsr DMAR ATSR table
+**/
+VOID
+VtdLibDumpDmarAtsr (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_ATSR_HEADER *Atsr
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN AtsrLen;
+
+ if (Atsr == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * Root Port ATS Capability Reporting Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " ATSR address ........................................... 0x%016lx\n" :
+ " ATSR address ........................................... 0x%08x\n",
+ Atsr
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Atsr->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Atsr->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Atsr->Flags
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ALL_PORTS .......................................... 0x%02x\n",
+ Atsr->Flags & EFI_ACPI_DMAR_ATSR_FLAGS_ALL_PORTS
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Atsr->SegmentNumber
+ ));
+
+ AtsrLen = Atsr->Header.Length - sizeof(EFI_ACPI_DMAR_ATSR_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Atsr + 1);
+ while (AtsrLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ AtsrLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR RMRR table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Rmrr DMAR RMRR table
+**/
+VOID
+VtdLibDumpDmarRmrr (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_RMRR_HEADER *Rmrr
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN RmrrLen;
+
+ if (Rmrr == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * Reserved Memory Region Reporting Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " RMRR address ........................................... 0x%016lx\n" :
+ " RMRR address ........................................... 0x%08x\n",
+ Rmrr
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Rmrr->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Rmrr->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Rmrr->SegmentNumber
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Reserved Memory Region Base Address .................. 0x%016lx\n",
+ Rmrr->ReservedMemoryRegionBaseAddress
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Reserved Memory Region Limit Address ................. 0x%016lx\n",
+ Rmrr->ReservedMemoryRegionLimitAddress
+ ));
+
+ RmrrLen = Rmrr->Header.Length - sizeof(EFI_ACPI_DMAR_RMRR_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Rmrr + 1);
+ while (RmrrLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ RmrrLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DMAR DRHD table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Drhd DMAR DRHD table
+**/
+VOID
+VtdLibDumpDmarDrhd (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_DRHD_HEADER *Drhd
+ )
+{
+ EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *DmarDeviceScopeEntry;
+ INTN DrhdLen;
+
+ if (Drhd == NULL) {
+ return;
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " * DMA-Remapping Hardware Definition Structure *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ " DRHD address ........................................... 0x%016lx\n" :
+ " DRHD address ........................................... 0x%08x\n",
+ Drhd
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Type ................................................. 0x%04x\n",
+ Drhd->Header.Type
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Length ............................................... 0x%04x\n",
+ Drhd->Header.Length
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Drhd->Flags
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " INCLUDE_PCI_ALL .................................... 0x%02x\n",
+ Drhd->Flags & EFI_ACPI_DMAR_DRHD_FLAGS_INCLUDE_PCI_ALL
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Size ................................................. 0x%02x\n",
+ Drhd->Size
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Segment Number ....................................... 0x%04x\n",
+ Drhd->SegmentNumber
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Register Base Address ................................ 0x%016lx\n",
+ Drhd->RegisterBaseAddress
+ ));
+
+ DrhdLen = Drhd->Header.Length - sizeof(EFI_ACPI_DMAR_DRHD_HEADER);
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)(Drhd + 1);
+ while (DrhdLen > 0) {
+ VtdLibDumpDmarDeviceScopeEntry (Context, CallbackHandle, DmarDeviceScopeEntry);
+ DrhdLen -= DmarDeviceScopeEntry->Length;
+ DmarDeviceScopeEntry = (EFI_ACPI_DMAR_DEVICE_SCOPE_STRUCTURE_HEADER *)((UINTN)DmarDeviceScopeEntry + DmarDeviceScopeEntry->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " ***************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump Header of DMAR ACPI table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmarHeader (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ )
+{
+ //
+ // Dump Dmar table
+ //
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "* DMAR Table *\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n"
+ ));
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ (sizeof(UINTN) == sizeof(UINT64)) ?
+ "DMAR address ............................................. 0x%016lx\n" :
+ "DMAR address ............................................. 0x%08x\n",
+ Dmar
+ ));
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Table Contents:\n"
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Host Address Width ................................... 0x%02x\n",
+ Dmar->HostAddressWidth
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " Flags ................................................ 0x%02x\n",
+ Dmar->Flags
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " INTR_REMAP ......................................... 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_INTR_REMAP
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " X2APIC_OPT_OUT_SET ................................. 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_X2APIC_OPT_OUT
+ ));
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ " DMA_CTRL_PLATFORM_OPT_IN_FLAG ...................... 0x%02x\n",
+ Dmar->Flags & EFI_ACPI_DMAR_FLAGS_DMA_CTRL_PLATFORM_OPT_IN_FLAG
+ ));
+}
+
+/**
+ Dump DMAR ACPI table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmar (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ INTN DmarLen;
+
+ if (Dmar == NULL) {
+ return;
+ }
+
+ //
+ // Dump Dmar table
+ //
+ VtdLibDumpAcpiDmarHeader (Context, CallbackHandle, Dmar);
+
+ DmarLen = Dmar->Header.Length - sizeof(EFI_ACPI_DMAR_HEADER);
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)(Dmar + 1);
+ while (DmarLen > 0) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ VtdLibDumpDmarDrhd (Context, CallbackHandle, (EFI_ACPI_DMAR_DRHD_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_RMRR:
+ VtdLibDumpDmarRmrr (Context, CallbackHandle, (EFI_ACPI_DMAR_RMRR_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_ATSR:
+ VtdLibDumpDmarAtsr (Context, CallbackHandle, (EFI_ACPI_DMAR_ATSR_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_RHSA:
+ VtdLibDumpDmarRhsa (Context, CallbackHandle, (EFI_ACPI_DMAR_RHSA_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_ANDD:
+ VtdLibDumpDmarAndd (Context, CallbackHandle, (EFI_ACPI_DMAR_ANDD_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_SATC:
+ VtdLibDumpDmarSatc (Context, CallbackHandle, (EFI_ACPI_DMAR_SATC_HEADER *)DmarHeader);
+ break;
+ case EFI_ACPI_DMAR_TYPE_SIDP:
+ VtdLibDumpDmarSidp (Context, CallbackHandle, (EFI_ACPI_DMAR_SIDP_HEADER *)DmarHeader);
+ break;
+ default:
+ break;
+ }
+ DmarLen -= DmarHeader->Length;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump DRHD DMAR ACPI table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Dmar DMAR ACPI table
+**/
+VOID
+VtdLibDumpAcpiDmarDrhd (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN EFI_ACPI_DMAR_HEADER *Dmar
+ )
+{
+ EFI_ACPI_DMAR_STRUCTURE_HEADER *DmarHeader;
+ INTN DmarLen;
+
+ if (Dmar == NULL) {
+ return;
+ }
+
+ //
+ // Dump Dmar table
+ //
+ VtdLibDumpAcpiDmarHeader (Context, CallbackHandle, Dmar);
+
+ DmarLen = Dmar->Header.Length - sizeof(EFI_ACPI_DMAR_HEADER);
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)(Dmar + 1);
+ while (DmarLen > 0) {
+ switch (DmarHeader->Type) {
+ case EFI_ACPI_DMAR_TYPE_DRHD:
+ VtdLibDumpDmarDrhd (Context, CallbackHandle, (EFI_ACPI_DMAR_DRHD_HEADER *)DmarHeader);
+ break;
+ default:
+ break;
+ }
+ DmarLen -= DmarHeader->Length;
+ DmarHeader = (EFI_ACPI_DMAR_STRUCTURE_HEADER *)((UINTN)DmarHeader + DmarHeader->Length);
+ }
+
+ VTDLIB_DEBUG ((DEBUG_INFO,
+ "*****************************************************************************\n\n"
+ ));
+}
+
+/**
+ Dump the PCI device information managed by this VTd engine.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] PciDeviceInfo VTd Unit Information
+**/
+VOID
+VtdLibDumpPciDeviceInfo (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN PCI_DEVICE_INFORMATION *PciDeviceInfo
+ )
+{
+ UINTN Index;
+
+ if (PciDeviceInfo != NULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "PCI Device Information (Number 0x%x, IncludeAll - %d):\n",
+ PciDeviceInfo->PciDeviceDataNumber,
+ PciDeviceInfo->IncludeAllFlag
+ ));
+ for (Index = 0; Index < PciDeviceInfo->PciDeviceDataNumber; Index++) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " S%04x B%02x D%02x F%02x\n",
+ PciDeviceInfo->Segment,
+ PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Bus,
+ PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Device,
+ PciDeviceInfo->PciDeviceData[Index].PciSourceId.Bits.Function
+ ));
+ }
+ }
+}
+
+/**
+ Dump DMAR second level paging entry.
+
+ @param[in] Context Event context
+ @param[in] CallbackHandle Callback handler
+ @param[in] SecondLevelPagingEntry The second level paging entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpSecondLevelPagingEntry (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VOID *SecondLevelPagingEntry,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index5;
+ UINTN Index4;
+ UINTN Index3;
+ UINTN Index2;
+ UINTN Index1;
+ UINTN Lvl5IndexEnd;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl5PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl4PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl3PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl2PtEntry;
+ VTD_SECOND_LEVEL_PAGING_ENTRY *Lvl1PtEntry;
+
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, "================\n"));
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, "DMAR Second Level Page Table:\n"));
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, "SecondLevelPagingEntry Base - 0x%x, Is5LevelPaging - %d\n", SecondLevelPagingEntry, Is5LevelPaging));
+
+ Lvl5IndexEnd = Is5LevelPaging ? SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY) : 1;
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+ Lvl5PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)SecondLevelPagingEntry;
+
+ for (Index5 = 0; Index5 < Lvl5IndexEnd; Index5++) {
+ if (Is5LevelPaging) {
+ if (Lvl5PtEntry[Index5].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl5Pt Entry(0x%03x) - 0x%016lx\n", Index5, Lvl5PtEntry[Index5].Uint64));
+ }
+ if (Lvl5PtEntry[Index5].Uint64 == 0) {
+ continue;
+ }
+ Lvl4PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl5PtEntry[Index5].Bits.AddressLo, Lvl5PtEntry[Index5].Bits.AddressHi);
+ }
+
+ for (Index4 = 0; Index4 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index4++) {
+ if (Lvl4PtEntry[Index4].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl4Pt Entry(0x%03x) - 0x%016lx\n", Index4, Lvl4PtEntry[Index4].Uint64));
+ }
+ if (Lvl4PtEntry[Index4].Uint64 == 0) {
+ continue;
+ }
+ Lvl3PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl4PtEntry[Index4].Bits.AddressLo, Lvl4PtEntry[Index4].Bits.AddressHi);
+ for (Index3 = 0; Index3 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index3++) {
+ if (Lvl3PtEntry[Index3].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl3Pt Entry(0x%03x) - 0x%016lx\n", Index3, Lvl3PtEntry[Index3].Uint64));
+ }
+ if (Lvl3PtEntry[Index3].Uint64 == 0) {
+ continue;
+ }
+
+ Lvl2PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl3PtEntry[Index3].Bits.AddressLo, Lvl3PtEntry[Index3].Bits.AddressHi);
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index2++) {
+ if (Lvl2PtEntry[Index2].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl2Pt Entry(0x%03x) - 0x%016lx\n", Index2, Lvl2PtEntry[Index2].Uint64));
+ }
+ if (Lvl2PtEntry[Index2].Uint64 == 0) {
+ continue;
+ }
+ if (Lvl2PtEntry[Index2].Bits.PageSize == 0) {
+ Lvl1PtEntry = (VTD_SECOND_LEVEL_PAGING_ENTRY *)(UINTN)VTD_64BITS_ADDRESS(Lvl2PtEntry[Index2].Bits.AddressLo, Lvl2PtEntry[Index2].Bits.AddressHi);
+ for (Index1 = 0; Index1 < SIZE_4KB/sizeof(VTD_SECOND_LEVEL_PAGING_ENTRY); Index1++) {
+ if (Lvl1PtEntry[Index1].Uint64 != 0) {
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, " Lvl1Pt Entry(0x%03x) - 0x%016lx\n", Index1, Lvl1PtEntry[Index1].Uint64));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ VTDLIB_DEBUG ((DEBUG_VERBOSE, "================\n"));
+}
+
+/**
+ Dump DMAR context entry table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] RootEntry DMAR root entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpDmarContextEntryTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_ROOT_ENTRY *RootEntry,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index;
+ UINTN Index2;
+ VTD_CONTEXT_ENTRY *ContextEntry;
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "=========================\n"));
+ VTDLIB_DEBUG ((DEBUG_INFO, "DMAR Context Entry Table:\n"));
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "RootEntry Address - 0x%x\n", RootEntry));
+
+ for (Index = 0; Index < VTD_ROOT_ENTRY_NUMBER; Index++) {
+ if ((RootEntry[Index].Uint128.Uint64Lo != 0) || (RootEntry[Index].Uint128.Uint64Hi != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " RootEntry(0x%02x) B%02x - 0x%016lx %016lx\n",
+ Index, Index, RootEntry[Index].Uint128.Uint64Hi, RootEntry[Index].Uint128.Uint64Lo));
+ }
+ if (RootEntry[Index].Bits.Present == 0) {
+ continue;
+ }
+ ContextEntry = (VTD_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS (RootEntry[Index].Bits.ContextTablePointerLo, RootEntry[Index].Bits.ContextTablePointerHi);
+ for (Index2 = 0; Index2 < VTD_CONTEXT_ENTRY_NUMBER; Index2++) {
+ if ((ContextEntry[Index2].Uint128.Uint64Lo != 0) || (ContextEntry[Index2].Uint128.Uint64Hi != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " ContextEntry(0x%02x) D%02xF%02x - 0x%016lx %016lx\n",
+ Index2, Index2 >> 3, Index2 & 0x7, ContextEntry[Index2].Uint128.Uint64Hi, ContextEntry[Index2].Uint128.Uint64Lo));
+ }
+ if (ContextEntry[Index2].Bits.Present == 0) {
+ continue;
+ }
+ VtdLibDumpSecondLevelPagingEntry (Context, CallbackHandle, (VOID *) (UINTN) VTD_64BITS_ADDRESS (ContextEntry[Index2].Bits.SecondLevelPageTranslationPointerLo, ContextEntry[Index2].Bits.SecondLevelPageTranslationPointerHi), Is5LevelPaging);
+ }
+ }
+ VTDLIB_DEBUG ((DEBUG_INFO, "=========================\n"));
+}
+
+/**
+ Dump DMAR extended context entry table.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] ExtRootEntry DMAR extended root entry.
+ @param[in] Is5LevelPaging If it is the 5 level paging.
+**/
+VOID
+VtdLibDumpDmarExtContextEntryTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_EXT_ROOT_ENTRY *ExtRootEntry,
+ IN BOOLEAN Is5LevelPaging
+ )
+{
+ UINTN Index;
+ UINTN Index2;
+ VTD_EXT_CONTEXT_ENTRY *ExtContextEntry;
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "=========================\n"));
+ VTDLIB_DEBUG ((DEBUG_INFO, "DMAR ExtContext Entry Table:\n"));
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "ExtRootEntry Address - 0x%x\n", ExtRootEntry));
+
+ for (Index = 0; Index < VTD_ROOT_ENTRY_NUMBER; Index++) {
+ if ((ExtRootEntry[Index].Uint128.Uint64Lo != 0) || (ExtRootEntry[Index].Uint128.Uint64Hi != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " ExtRootEntry(0x%02x) B%02x - 0x%016lx %016lx\n",
+ Index, Index, ExtRootEntry[Index].Uint128.Uint64Hi, ExtRootEntry[Index].Uint128.Uint64Lo));
+ }
+ if (ExtRootEntry[Index].Bits.LowerPresent == 0) {
+ continue;
+ }
+ ExtContextEntry = (VTD_EXT_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS (ExtRootEntry[Index].Bits.LowerContextTablePointerLo, ExtRootEntry[Index].Bits.LowerContextTablePointerHi);
+ for (Index2 = 0; Index2 < VTD_CONTEXT_ENTRY_NUMBER/2; Index2++) {
+ if ((ExtContextEntry[Index2].Uint256.Uint64_1 != 0) || (ExtContextEntry[Index2].Uint256.Uint64_2 != 0) ||
+ (ExtContextEntry[Index2].Uint256.Uint64_3 != 0) || (ExtContextEntry[Index2].Uint256.Uint64_4 != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " ExtContextEntryLower(0x%02x) D%02xF%02x - 0x%016lx %016lx %016lx %016lx\n",
+ Index2, Index2 >> 3, Index2 & 0x7, ExtContextEntry[Index2].Uint256.Uint64_4, ExtContextEntry[Index2].Uint256.Uint64_3, ExtContextEntry[Index2].Uint256.Uint64_2, ExtContextEntry[Index2].Uint256.Uint64_1));
+ }
+ if (ExtContextEntry[Index2].Bits.Present == 0) {
+ continue;
+ }
+ VtdLibDumpSecondLevelPagingEntry (Context, CallbackHandle, (VOID *) (UINTN) VTD_64BITS_ADDRESS (ExtContextEntry[Index2].Bits.SecondLevelPageTranslationPointerLo, ExtContextEntry[Index2].Bits.SecondLevelPageTranslationPointerHi), Is5LevelPaging);
+ }
+
+ if (ExtRootEntry[Index].Bits.UpperPresent == 0) {
+ continue;
+ }
+ ExtContextEntry = (VTD_EXT_CONTEXT_ENTRY *) (UINTN) VTD_64BITS_ADDRESS (ExtRootEntry[Index].Bits.UpperContextTablePointerLo, ExtRootEntry[Index].Bits.UpperContextTablePointerHi);
+ for (Index2 = 0; Index2 < VTD_CONTEXT_ENTRY_NUMBER/2; Index2++) {
+ if ((ExtContextEntry[Index2].Uint256.Uint64_1 != 0) || (ExtContextEntry[Index2].Uint256.Uint64_2 != 0) ||
+ (ExtContextEntry[Index2].Uint256.Uint64_3 != 0) || (ExtContextEntry[Index2].Uint256.Uint64_4 != 0)) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " ExtContextEntryUpper(0x%02x) D%02xF%02x - 0x%016lx %016lx %016lx %016lx\n",
+ Index2, (Index2 + 128) >> 3, (Index2 + 128) & 0x7, ExtContextEntry[Index2].Uint256.Uint64_4, ExtContextEntry[Index2].Uint256.Uint64_3, ExtContextEntry[Index2].Uint256.Uint64_2, ExtContextEntry[Index2].Uint256.Uint64_1));
+ }
+ if (ExtContextEntry[Index2].Bits.Present == 0) {
+ continue;
+ }
+ }
+ }
+ VTDLIB_DEBUG ((DEBUG_INFO, "=========================\n"));
+}
+
+/**
+ Dump VTd FRCD register.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] FrcdRegNum FRCD Register Number
+ @param[in] FrcdRegTab FRCD Register Table
+**/
+VOID
+VtdLibDumpVtdFrcdRegs (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN UINT16 FrcdRegNum,
+ IN VTD_UINT128 *FrcdRegTab
+ )
+{
+ UINT16 Index;
+ VTD_FRCD_REG FrcdReg;
+ VTD_SOURCE_ID SourceId;
+
+ for (Index = 0; Index < FrcdRegNum; Index++) {
+ FrcdReg.Uint64[0] = FrcdRegTab[Index].Uint64Lo;
+ FrcdReg.Uint64[1] = FrcdRegTab[Index].Uint64Hi;
+ VTDLIB_DEBUG ((DEBUG_INFO, " FRCD_REG[%d] - 0x%016lx %016lx\n", Index, FrcdReg.Uint64[1], FrcdReg.Uint64[0]));
+ if (FrcdReg.Uint64[1] != 0 || FrcdReg.Uint64[0] != 0) {
+ VTDLIB_DEBUG ((DEBUG_INFO, " Fault Info - 0x%016lx\n", VTD_64BITS_ADDRESS(FrcdReg.Bits.FILo, FrcdReg.Bits.FIHi)));
+ VTDLIB_DEBUG ((DEBUG_INFO, " Fault Bit - %d\n", FrcdReg.Bits.F));
+ SourceId.Uint16 = (UINT16)FrcdReg.Bits.SID;
+ VTDLIB_DEBUG ((DEBUG_INFO, " Source - B%02x D%02x F%02x\n", SourceId.Bits.Bus, SourceId.Bits.Device, SourceId.Bits.Function));
+ VTDLIB_DEBUG ((DEBUG_INFO, " Type - 0x%02x\n", (FrcdReg.Bits.T1 << 1) | FrcdReg.Bits.T2));
+ VTDLIB_DEBUG ((DEBUG_INFO, " Reason - %x (Refer to VTd Spec, Appendix A)\n", FrcdReg.Bits.FR));
+ }
+ }
+}
+
+/**
+ Dump VTd registers.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers information
+**/
+VOID
+VtdLibDumpVtdRegsAll (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_INFO *VtdRegInfo
+ )
+{
+ if (VtdRegInfo != NULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "VTd Engine: [0x%016lx]\n", VtdRegInfo->BaseAddress));
+ VTDLIB_DEBUG ((DEBUG_INFO, " VER_REG - 0x%08x\n", VtdRegInfo->VerReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " CAP_REG - 0x%016lx\n", VtdRegInfo->CapReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " ECAP_REG - 0x%016lx\n", VtdRegInfo->EcapReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " GSTS_REG - 0x%08x \n", VtdRegInfo->GstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " RTADDR_REG - 0x%016lx\n", VtdRegInfo->RtaddrReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " CCMD_REG - 0x%016lx\n", VtdRegInfo->CcmdReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FSTS_REG - 0x%08x\n", VtdRegInfo->FstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FECTL_REG - 0x%08x\n", VtdRegInfo->FectlReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FEDATA_REG - 0x%08x\n", VtdRegInfo->FedataReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FEADDR_REG - 0x%08x\n", VtdRegInfo->FeaddrReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FEUADDR_REG - 0x%08x\n", VtdRegInfo->FeuaddrReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " IQERCD_REG - 0x%016lx\n", VtdRegInfo->IqercdReg));
+
+ VtdLibDumpVtdFrcdRegs (Context, CallbackHandle, VtdRegInfo->FrcdRegNum, VtdRegInfo->FrcdReg);
+
+ VTDLIB_DEBUG ((DEBUG_INFO, " IVA_REG - 0x%016lx\n", VtdRegInfo->IvaReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " IOTLB_REG - 0x%016lx\n", VtdRegInfo->IotlbReg));
+ }
+}
+
+/**
+ Dump VTd registers.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers information
+**/
+VOID
+VtdLibDumpVtdRegsThin (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_THIN_INFO *VtdRegInfo
+ )
+{
+ if (VtdRegInfo != NULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "VTd Engine: [0x%016lx]\n", VtdRegInfo->BaseAddress));
+ VTDLIB_DEBUG ((DEBUG_INFO, " GSTS_REG - 0x%08x \n", VtdRegInfo->GstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " RTADDR_REG - 0x%016lx\n", VtdRegInfo->RtaddrReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FSTS_REG - 0x%08x\n", VtdRegInfo->FstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FECTL_REG - 0x%08x\n", VtdRegInfo->FectlReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " IQERCD_REG - 0x%016lx\n", VtdRegInfo->IqercdReg));
+
+ VtdLibDumpVtdFrcdRegs (Context, CallbackHandle, VtdRegInfo->FrcdRegNum, VtdRegInfo->FrcdReg);
+ }
+}
+
+/**
+ Dump VTd registers.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] VtdRegInfo Registers information
+**/
+VOID
+VtdLibDumpVtdRegsQi (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTD_REGESTER_QI_INFO *VtdRegInfo
+ )
+{
+ if (VtdRegInfo != NULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "VTd Engine: [0x%016lx]\n", VtdRegInfo->BaseAddress));
+ VTDLIB_DEBUG ((DEBUG_INFO, " FSTS_REG - 0x%08x\n", VtdRegInfo->FstsReg));
+ VTDLIB_DEBUG ((DEBUG_INFO, " IQERCD_REG - 0x%016lx\n", VtdRegInfo->IqercdReg));
+ }
+}
+
+/**
+ Dump Vtd PEI pre-mem event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_2PARAM event
+
+**/
+VOID
+VtdLibDumpPeiPreMemInfo (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_2PARAM *Event
+ )
+{
+ UINT64 VtdBarAddress;
+ UINT64 Mode;
+ UINT64 Status;
+
+ VtdBarAddress = Event->Data1;
+ Mode = Event->Data2 & 0xFF;
+ Status = (Event->Data2>>8) & 0xFF;
+
+ switch (Mode) {
+ case VTD_LOG_PEI_PRE_MEM_DISABLE:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Disabled [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ case VTD_LOG_PEI_PRE_MEM_ADM:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Enable Abort DMA Mode [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ case VTD_LOG_PEI_PRE_MEM_TE:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Enable NULL Root Entry Table [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ case VTD_LOG_PEI_PRE_MEM_PMR:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Enable PMR [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ case VTD_LOG_PEI_PRE_MEM_NOT_USED:
+ //
+ // Not used
+ //
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI (pre-memory): Unknown [0x%016lx] 0x%x\n", VtdBarAddress, Status));
+ break;
+ }
+}
+
+/**
+ Dump Vtd Queued Invaildation event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_2PARAM event
+
+**/
+VOID
+VtdLibDumpQueuedInvaildation (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_2PARAM *Event
+ )
+{
+ switch (Event->Data1) {
+ case VTD_LOG_QI_DISABLE:
+ VTDLIB_DEBUG ((DEBUG_INFO, " [0x%016lx] Disable\n", Event->Data2));
+ break;
+ case VTD_LOG_QI_ENABLE:
+ VTDLIB_DEBUG ((DEBUG_INFO, " [0x%016lx] Enable\n", Event->Data2));
+ break;
+ case VTD_LOG_QI_ERROR_OUT_OF_RESOURCES:
+ VTDLIB_DEBUG ((DEBUG_INFO, " [0x%016lx] error - Out of resources\n", Event->Data2));
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, " [0x%016lx] error - (0x%x)\n", Event->Data2, Event->Data1));
+ break;
+ }
+}
+
+/**
+ Dump Vtd registers event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_CONTEXT event
+
+**/
+VOID
+VtdLibDumpRegisters (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_CONTEXT *Event
+ )
+{
+ switch (Event->Param) {
+ case VTDLOG_REGISTER_ALL:
+ VtdLibDumpVtdRegsAll (Context, CallbackHandle, (VTD_REGESTER_INFO *) Event->Data);
+ break;
+ case VTDLOG_REGISTER_THIN:
+ VtdLibDumpVtdRegsThin (Context, CallbackHandle, (VTD_REGESTER_THIN_INFO *) Event->Data);
+ break;
+ case VTDLOG_REGISTER_QI:
+ VtdLibDumpVtdRegsQi (Context, CallbackHandle, (VTD_REGESTER_QI_INFO *) Event->Data);
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, " Unknown format (%d)\n", Event->Param));
+ break;
+ }
+}
+
+/**
+ Dump Vtd PEI Error event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_2PARAM event
+
+**/
+VOID
+VtdLibDumpPeiError (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_2PARAM *Event
+ )
+{
+ UINT64 Timestamp;
+
+ Timestamp = Event->Header.Timestamp;
+
+ switch (Event->Data1) {
+ case VTD_LOG_PEI_VTD_ERROR_PPI_ALLOC:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Error - PPI alloc length [0x%016lx]\n", Timestamp, Event->Data2));
+ break;
+ case VTD_LOG_PEI_VTD_ERROR_PPI_MAP:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Error - PPI map length [0x%016lx]\n", Timestamp, Event->Data2));
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Error - Unknown (%d) 0x%x\n", Timestamp, Event->Data1, Event->Data2));
+ break;
+ }
+}
+
+/**
+ Dump Vtd registers event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_CONTEXT event
+
+**/
+VOID
+VtdLibDumpSetAttribute (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_CONTEXT *Event
+ )
+{
+ VTD_PROTOCOL_SET_ATTRIBUTE * SetAttributeInfo;
+
+ SetAttributeInfo = (VTD_PROTOCOL_SET_ATTRIBUTE *) Event->Data;
+
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: SetAttribute SourceId = 0x%04x, Address = 0x%lx, Length = 0x%lx, IoMmuAccess = 0x%lx, %r\n",
+ Event->Header.Timestamp,
+ SetAttributeInfo->SourceId.Uint16,
+ SetAttributeInfo->DeviceAddress,
+ SetAttributeInfo->Length,
+ SetAttributeInfo->Status));
+}
+
+
+
+/**
+ Dump Vtd Root Table event.
+
+ @param[in] Context Event context
+ @param[in out] CallbackHandle Callback handler
+ @param[in] Event VTDLOG_EVENT_CONTEXT event
+
+**/
+VOID
+VtdLibDumpRootTable (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT_CONTEXT *Event
+ )
+{
+ VTD_ROOT_TABLE_INFO *RootTableInfo;
+
+ RootTableInfo = (VTD_ROOT_TABLE_INFO *) Event->Data;
+ if (Event->Param == 0) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Root Entry Table [0x%016lx]\n", Event->Header.Timestamp, RootTableInfo->BaseAddress));
+ VtdLibDumpDmarContextEntryTable (Context, CallbackHandle, (VTD_ROOT_ENTRY *) (UINTN) RootTableInfo->TableAddress, RootTableInfo->Is5LevelPaging);
+
+ } else if (Event->Param == 1) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Ext Root Entry Table [0x%016lx]\n", Event->Header.Timestamp, RootTableInfo->BaseAddress));
+ VtdLibDumpDmarExtContextEntryTable (Context, CallbackHandle, (VTD_EXT_ROOT_ENTRY *) (UINTN) RootTableInfo->TableAddress, RootTableInfo->Is5LevelPaging);
+
+ } else {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Unknown Root Table Type (%d)\n", Event->Header.Timestamp, Event->Param));
+ }
+}
+
+/**
+ Decode log event.
+
+ @param[in] Context Event context
+ @param[in out] PciDeviceId Callback handler
+ @param[in] Event Event struct
+
+ @retval TRUE Decode event success
+ @retval FALSE Unknown event
+**/
+BOOLEAN
+VtdLibDecodeEvent (
+ IN VOID *Context,
+ IN OUT EDKII_VTD_LIB_STRING_CB CallbackHandle,
+ IN VTDLOG_EVENT *Event
+ )
+{
+ BOOLEAN Result;
+ UINT64 Timestamp;
+ UINT64 Data1;
+ UINT64 Data2;
+
+ Result = TRUE;
+ Timestamp = Event->EventHeader.Timestamp;
+ Data1 = Event->CommenEvent.Data1;
+ Data2 = Event->CommenEvent.Data2;
+
+ switch (Event->EventHeader.LogType) {
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_BASIC):
+ if (Data1 & VTD_LOG_ERROR_BUFFER_FULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Info : Log Buffer Full\n", Timestamp));
+ Data1 &= ~VTD_LOG_ERROR_BUFFER_FULL;
+ }
+ if (Data1 != 0) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Info : 0x%x, 0x%x\n", Timestamp, Data1, Data2));
+ }
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PRE_MEM_DMA_PROTECT):
+ VtdLibDumpPeiPreMemInfo (Context, CallbackHandle, &(Event->CommenEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PMR_LOW_MEMORY_RANGE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: PMR Low Memory Range [0x%x, 0x%x]\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PMR_HIGH_MEMORY_RANGE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: PMR High Memory Range [0x%016lx, 0x%016lx]\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PROTECT_MEMORY_RANGE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Protected DMA Memory Range [0x%016lx, 0x%016lx]\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_POST_MEM_ENABLE_DMA_PROTECT):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Enable DMA protection [0x%016lx] %r\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_POST_MEM_DISABLE_DMA_PROTECT):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Disable DMA protection [0x%016lx]\n", Timestamp, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_QUEUED_INVALIDATION):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Queued Invalidation", Timestamp));
+ VtdLibDumpQueuedInvaildation (Context, CallbackHandle, &(Event->CommenEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_REGISTER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: Dump Registers\n", Timestamp));
+ VtdLibDumpRegisters (Context, CallbackHandle, &(Event->ContextEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_VTD_ERROR):
+ VtdLibDumpPeiError (Context, CallbackHandle, &(Event->CommenEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PPI_ALLOC_BUFFER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: PPI AllocateBuffer 0x%x, Length = 0x%x\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_PEI_PPI_MAP):
+ VTDLIB_DEBUG ((DEBUG_INFO, "PEI [%ld]: PPI Map 0x%x, Length = 0x%x\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_BASIC):
+ if (Data1 & VTD_LOG_ERROR_BUFFER_FULL) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Info : Log Buffer Full\n", Timestamp));
+ Data1 &= ~VTD_LOG_ERROR_BUFFER_FULL;
+ }
+ if (Data1 != 0) {
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Info : 0x%x, 0x%x\n", Timestamp, Data1, Data2));
+ }
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_DMAR_TABLE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: DMAR Table\n", Timestamp));
+ VtdLibDumpAcpiDmar (Context, CallbackHandle, (EFI_ACPI_DMAR_HEADER *) Event->ContextEvent.Data);
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_SETUP_VTD):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Setup VTd Below/Above 4G Memory Limit = [0x%016lx, 0x%016lx]\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_PCI_DEVICE):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: PCI Devices [0x%016lx]\n", Timestamp, Event->ContextEvent.Param));
+ VtdLibDumpPciDeviceInfo (Context, CallbackHandle, (PCI_DEVICE_INFORMATION *) Event->ContextEvent.Data);
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_REGISTER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Dump Registers\n", Timestamp));
+ VtdLibDumpRegisters (Context, CallbackHandle, &(Event->ContextEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_ENABLE_DMAR):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Enable DMAR [0x%016lx]\n", Timestamp, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_DISABLE_DMAR):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Disable DMAR [0x%016lx]\n", Timestamp, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_DISABLE_PMR):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Disable PMR [0x%016lx] %r\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_INSTALL_IOMMU_PROTOCOL):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Install IOMMU Protocol %r\n", Timestamp, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_QUEUED_INVALIDATION):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Queued Invalidation", Timestamp));
+ VtdLibDumpQueuedInvaildation (Context, CallbackHandle, &(Event->CommenEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_ROOT_TABLE):
+ VtdLibDumpRootTable (Context, CallbackHandle, &(Event->ContextEvent));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_ALLOC_BUFFER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: AllocateBuffer 0x%x, Page = 0x%x\n", Timestamp, Data2, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_FREE_BUFFER):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: FreeBuffer 0x%x, Page = 0x%x\n", Timestamp, Data2, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_MAP):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Map 0x%x, Operation = 0x%x\n", Timestamp, Data1, Data2));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_UNMAP):
+ VTDLIB_DEBUG ((DEBUG_INFO, "DXE [%ld]: Unmap 0x%x, NumberOfBytes = 0x%x\n", Timestamp, Data2, Data1));
+ break;
+ case VTDLOG_LOG_TYPE (VTDLOG_DXE_IOMMU_SET_ATTRIBUTE):
+ VtdLibDumpSetAttribute (Context, CallbackHandle, &(Event->ContextEvent));
+ break;
+ default:
+ VTDLIB_DEBUG ((DEBUG_INFO, "## Unknown VTd Event Type=%d Timestamp=%ld Size=%d\n", Event->EventHeader.LogType, Event->EventHeader.Timestamp, Event->EventHeader.DataSize));
+ Result = FALSE;
+ break;
+ }
+
+ return Result;
+}
+
+/**
+ Flush VTd engine write buffer.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+VtdLibFlushWriteBuffer (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ UINT32 Reg32;
+ VTD_CAP_REG CapReg;
+
+ CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+
+ if (CapReg.Bits.RWBF != 0) {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32 | B_GMCD_REG_WBF);
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_WBF) != 0);
+ }
+}
+
+/**
+ Clear Global Command Register Bits
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] BitMask Bit mask
+**/
+VOID
+VtdLibClearGlobalCommandRegisterBits (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT32 BitMask
+ )
+{
+ UINT32 Reg32;
+ UINT32 Status;
+ UINT32 Command;
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Status = (Reg32 & 0x96FFFFFF); // Reset the one-shot bits
+ Command = (Status & (~BitMask));
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Command);
+
+ DEBUG ((DEBUG_INFO, "Clear GCMD_REG bits 0x%x.\n", BitMask));
+
+ //
+ // Poll on Status bit of Global status register to become zero
+ //
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & BitMask) == BitMask);
+ DEBUG ((DEBUG_INFO, "GSTS_REG : 0x%08x \n", Reg32));
+}
+
+/**
+ Set Global Command Register Bits
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] BitMask Bit mask
+**/
+VOID
+VtdLibSetGlobalCommandRegisterBits (
+ IN UINTN VtdUnitBaseAddress,
+ IN UINT32 BitMask
+ )
+{
+ UINT32 Reg32;
+ UINT32 Status;
+ UINT32 Command;
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Status = (Reg32 & 0x96FFFFFF); // Reset the one-shot bits
+ Command = (Status | BitMask);
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Command);
+
+ DEBUG ((DEBUG_INFO, "Set GCMD_REG bits 0x%x.\n", BitMask));
+
+ //
+ // Poll on Status bit of Global status register to become not zero
+ //
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & BitMask) == 0);
+ DEBUG ((DEBUG_INFO, "GSTS_REG : 0x%08x \n", Reg32));
+}
+
+/**
+ Disable DMAR translation.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS DMAR translation is disabled.
+**/
+EFI_STATUS
+VtdLibDisableDmar (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ UINT32 Reg32;
+
+ DEBUG ((DEBUG_INFO, ">>>>>>DisableDmar() for engine [%x]\n", VtdUnitBaseAddress));
+
+ //
+ // Write Buffer Flush before invalidation
+ //
+ VtdLibFlushWriteBuffer (VtdUnitBaseAddress);
+
+ //
+ // Disable Dmar
+ //
+ //
+ // Set TE (Translation Enable: BIT31) of Global command register to zero
+ //
+ VtdLibClearGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_TE);
+
+ //
+ // Set SRTP (Set Root Table Pointer: BIT30) of Global command register in order to update the root table pointerDisable VTd
+ //
+ VtdLibSetGlobalCommandRegisterBits (VtdUnitBaseAddress, B_GMCD_REG_SRTP);
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ DEBUG ((DEBUG_INFO, "DisableDmar: GSTS_REG - 0x%08x\n", Reg32));
+
+ MmioWrite64 (VtdUnitBaseAddress + R_RTADDR_REG, 0);
+
+ DEBUG ((DEBUG_INFO,"VTD () Disabled!<<<<<<\n"));
+
+ return EFI_SUCCESS;
+}
+
+/**
+ Disable PMR.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+
+ @retval EFI_SUCCESS PMR is disabled.
+ @retval EFI_UNSUPPORTED PMR is not supported.
+ @retval EFI_NOT_STARTED PMR was not enabled.
+**/
+EFI_STATUS
+VtdLibDisablePmr (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ UINT32 Reg32;
+ VTD_CAP_REG CapReg;
+ EFI_STATUS Status;
+
+ CapReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_CAP_REG);
+ if (CapReg.Bits.PLMR == 0 || CapReg.Bits.PHMR == 0) {
+ //
+ // PMR is not supported
+ //
+ return EFI_UNSUPPORTED;
+ }
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_PMEN_ENABLE_REG);
+ if ((Reg32 & BIT0) != 0) {
+ MmioWrite32 (VtdUnitBaseAddress + R_PMEN_ENABLE_REG, 0x0);
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_PMEN_ENABLE_REG);
+ } while((Reg32 & BIT0) != 0);
+
+ DEBUG ((DEBUG_INFO,"Pmr [0x%016lx] disabled\n", VtdUnitBaseAddress));
+ Status = EFI_SUCCESS;
+ } else {
+ DEBUG ((DEBUG_INFO,"Pmr [0x%016lx] not enabled\n", VtdUnitBaseAddress));
+ Status = EFI_NOT_STARTED;
+ }
+ return Status;
+}
+
+/**
+ Disable queued invalidation interface.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+**/
+VOID
+VtdLibDisableQueuedInvalidationInterface (
+ IN UINTN VtdUnitBaseAddress
+ )
+{
+ UINT32 Reg32;
+ QI_256_DESC QiDesc;
+
+ QiDesc.Uint64[0] = QI_IWD_TYPE;
+ QiDesc.Uint64[1] = 0;
+ QiDesc.Uint64[2] = 0;
+ QiDesc.Uint64[3] = 0;
+
+ VtdLibSubmitQueuedInvalidationDescriptor (VtdUnitBaseAddress, &QiDesc, TRUE);
+
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ Reg32 &= (~B_GMCD_REG_QIE);
+ MmioWrite32 (VtdUnitBaseAddress + R_GCMD_REG, Reg32);
+
+ DEBUG ((DEBUG_INFO, "Disable Queued Invalidation Interface. [%x] GCMD_REG = 0x%x\n", VtdUnitBaseAddress, Reg32));
+ do {
+ Reg32 = MmioRead32 (VtdUnitBaseAddress + R_GSTS_REG);
+ } while ((Reg32 & B_GSTS_REG_QIES) != 0);
+
+ MmioWrite64 (VtdUnitBaseAddress + R_IQA_REG, 0);
+}
+
+/**
+ Submit the queued invalidation descriptor to the remapping
+ hardware unit and wait for its completion.
+
+ @param[in] VtdUnitBaseAddress The base address of the VTd engine.
+ @param[in] Desc The invalidate descriptor
+ @param[in] ClearFaultBits Clear Error bits
+
+ @retval EFI_SUCCESS The operation was successful.
+ @retval EFI_INVALID_PARAMETER Parameter is invalid.
+ @retval EFI_NOT_READY Queued invalidation is not inited.
+ @retval EFI_DEVICE_ERROR Detect fault, need to clear fault bits if ClearFaultBits is FALSE
+
+**/
+EFI_STATUS
+VtdLibSubmitQueuedInvalidationDescriptor (
+ IN UINTN VtdUnitBaseAddress,
+ IN VOID *Desc,
+ IN BOOLEAN ClearFaultBits
+ )
+{
+ UINTN QueueSize;
+ UINTN QueueTail;
+ UINTN QueueHead;
+ QI_DESC *Qi128Desc;
+ QI_256_DESC *Qi256Desc;
+ VTD_IQA_REG IqaReg;
+ VTD_IQT_REG IqtReg;
+ VTD_IQH_REG IqhReg;
+ UINT32 FaultReg;
+ UINT64 IqercdReg;
+ UINT64 IQBassAddress;
+
+ if (Desc == NULL) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ IqaReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQA_REG);
+ //
+ // Get IQA_REG.IQA (Invalidation Queue Base Address)
+ //
+ IQBassAddress = RShiftU64 (IqaReg.Uint64, 12);
+ if (IQBassAddress == 0) {
+ DEBUG ((DEBUG_ERROR,"Invalidation Queue Buffer not ready [0x%lx]\n", IqaReg.Uint64));
+ return EFI_NOT_READY;
+ }
+ IqtReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQT_REG);
+
+ //
+ // Check IQA_REG.DW (Descriptor Width)
+ //
+ if ((IqaReg.Uint64 & BIT11) == 0) {
+ //
+ // 128-bit descriptor
+ //
+ QueueSize = (UINTN) (1 << (IqaReg.Bits.QS + 8));
+ Qi128Desc = (QI_DESC *) (UINTN) LShiftU64 (IQBassAddress, VTD_PAGE_SHIFT);
+ //
+ // Get IQT_REG.QT for 128-bit descriptors
+ //
+ QueueTail = (UINTN) (RShiftU64 (IqtReg.Uint64, 4) & 0x7FFF);
+ Qi128Desc += QueueTail;
+ CopyMem (Qi128Desc, Desc, sizeof (QI_DESC));
+ QueueTail = (QueueTail + 1) % QueueSize;
+
+ DEBUG ((DEBUG_VERBOSE, "[0x%x] Submit QI Descriptor 0x%x [0x%016lx, 0x%016lx]\n",
+ VtdUnitBaseAddress,
+ QueueTail,
+ Qi128Desc->Low,
+ Qi128Desc->High));
+
+ IqtReg.Uint64 &= ~(0x7FFF << 4);
+ IqtReg.Uint64 |= LShiftU64 (QueueTail, 4);
+ } else {
+ //
+ // 256-bit descriptor
+ //
+ QueueSize = (UINTN) (1 << (IqaReg.Bits.QS + 7));
+ Qi256Desc = (QI_256_DESC *) (UINTN) LShiftU64 (IQBassAddress, VTD_PAGE_SHIFT);
+ //
+ // Get IQT_REG.QT for 256-bit descriptors
+ //
+ QueueTail = (UINTN) (RShiftU64 (IqtReg.Uint64, 5) & 0x3FFF);
+ Qi256Desc += QueueTail;
+ CopyMem (Qi256Desc, Desc, sizeof (QI_256_DESC));
+ QueueTail = (QueueTail + 1) % QueueSize;
+
+ DEBUG ((DEBUG_VERBOSE, "[0x%x] Submit QI Descriptor 0x%x [0x%016lx, 0x%016lx, 0x%016lx, 0x%016lx]\n",
+ VtdUnitBaseAddress,
+ QueueTail,
+ Qi256Desc->Uint64[0],
+ Qi256Desc->Uint64[1],
+ Qi256Desc->Uint64[2],
+ Qi256Desc->Uint64[3]));
+
+ IqtReg.Uint64 &= ~(0x3FFF << 5);
+ IqtReg.Uint64 |= LShiftU64 (QueueTail, 5);
+ }
+
+ //
+ // Update the HW tail register indicating the presence of new descriptors.
+ //
+ MmioWrite64 (VtdUnitBaseAddress + R_IQT_REG, IqtReg.Uint64);
+
+ do {
+ FaultReg = MmioRead32 (VtdUnitBaseAddress + R_FSTS_REG);
+ if (FaultReg & (B_FSTS_REG_IQE | B_FSTS_REG_ITE | B_FSTS_REG_ICE)) {
+ IqercdReg = MmioRead64 (VtdUnitBaseAddress + R_IQERCD_REG);
+ DEBUG((DEBUG_ERROR, "BAR [0x%016lx] Detect Queue Invalidation Fault [0x%08x] - IQERCD [0x%016lx]\n", VtdUnitBaseAddress, FaultReg, IqercdReg));
+ if (ClearFaultBits) {
+ FaultReg &= (B_FSTS_REG_IQE | B_FSTS_REG_ITE | B_FSTS_REG_ICE);
+ MmioWrite32 (VtdUnitBaseAddress + R_FSTS_REG, FaultReg);
+ }
+ return EFI_DEVICE_ERROR;
+ }
+
+ IqhReg.Uint64 = MmioRead64 (VtdUnitBaseAddress + R_IQH_REG);
+ //
+ // Check IQA_REG.DW (Descriptor Width) and get IQH_REG.QH
+ //
+ if ((IqaReg.Uint64 & BIT11) == 0) {
+ QueueHead = (UINTN) (RShiftU64 (IqhReg.Uint64, 4) & 0x7FFF);
+ } else {
+ QueueHead = (UINTN) (RShiftU64 (IqhReg.Uint64, 5) & 0x3FFF);
+ }
+ } while (QueueTail != QueueHead);
+
+ return EFI_SUCCESS;
+}
diff --git a/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf
new file mode 100644
index 000000000..0d6dff5fa
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLib.inf
@@ -0,0 +1,30 @@
+### @file
+# Component information file for Intel VTd function library.
+#
+# Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+###
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = IntelVTdPeiDxeLib
+ FILE_GUID = 6cd8b1ea-152d-4cc9-b9b1-f5c692ba63da
+ VERSION_STRING = 1.0
+ MODULE_TYPE = BASE
+ LIBRARY_CLASS = IntelVTdPeiDxeLib
+
+[LibraryClasses]
+ BaseLib
+ PrintLib
+ IoLib
+ CacheMaintenanceLib
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ IntelSiliconPkg/IntelSiliconPkg.dec
+
+[Sources]
+ IntelVTdPeiDxeLib.c
diff --git a/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLibExt.inf b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLibExt.inf
new file mode 100644
index 000000000..9a2b28e12
--- /dev/null
+++ b/Silicon/Intel/IntelSiliconPkg/Library/IntelVTdPeiDxeLib/IntelVTdPeiDxeLibExt.inf
@@ -0,0 +1,34 @@
+### @file
+# Component information file for Intel VTd function library.
+#
+# Copyright (c) 2023, Intel Corporation. All rights reserved.<BR>
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+###
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = IntelVTdPeiDxeLib
+ FILE_GUID = 6fd8b3aa-852d-6ccA-b9b2-f5c692ba63ca
+ VERSION_STRING = 1.0
+ MODULE_TYPE = BASE
+ LIBRARY_CLASS = IntelVTdPeiDxeLib
+
+[LibraryClasses]
+ BaseLib
+ PrintLib
+ IoLib
+ CacheMaintenanceLib
+
+[Packages]
+ MdePkg/MdePkg.dec
+ MdeModulePkg/MdeModulePkg.dec
+ IntelSiliconPkg/IntelSiliconPkg.dec
+
+[Sources]
+ IntelVTdPeiDxeLib.c
+
+[BuildOptions]
+ *_*_X64_CC_FLAGS = -DEXT_CALLBACK
+
--
2.26.2.windows.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-05-23 8:13 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-03-02 6:10 [PATCH] IntelSiliconPkg/Vtd: Add Vtd core drivers Sheng Wei
-- strict thread matches above, loose matches on Subject: below --
2023-05-23 8:11 Sheng Wei
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox