From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received-SPF: Pass (sender SPF authorized) identity=mailfrom; client-ip=134.134.136.31; helo=mga06.intel.com; envelope-from=jian.j.wang@intel.com; receiver=edk2-devel@lists.01.org Received: from mga06.intel.com (mga06.intel.com [134.134.136.31]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ml01.01.org (Postfix) with ESMTPS id 8AD92203564A6 for ; Wed, 29 Nov 2017 00:42:22 -0800 (PST) Received: from orsmga002.jf.intel.com ([10.7.209.21]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 29 Nov 2017 00:46:46 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.44,472,1505804400"; d="scan'208";a="13253653" Received: from jwang36-mobl2.ccr.corp.intel.com ([10.239.192.50]) by orsmga002.jf.intel.com with ESMTP; 29 Nov 2017 00:46:44 -0800 From: Jian J Wang To: edk2-devel@lists.01.org Cc: Jiewen Yao , Star Zeng , Eric Dong Date: Wed, 29 Nov 2017 16:46:40 +0800 Message-Id: <20171129084640.20076-3-jian.j.wang@intel.com> X-Mailer: git-send-email 2.14.1.windows.1 In-Reply-To: <20171129084640.20076-1-jian.j.wang@intel.com> References: <20171129084640.20076-1-jian.j.wang@intel.com> Subject: [PATCH 2/2] MdeModulePkg/DxeIpl: Mark page table as read-only X-BeenThere: edk2-devel@lists.01.org X-Mailman-Version: 2.1.22 Precedence: list List-Id: EDK II Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 29 Nov 2017 08:42:22 -0000 This patch will set the memory pages used for page table as read-only memory after the paging is setup. CR0.WP must set to let it take into effect. Cc: Jiewen Yao Cc: Star Zeng Cc: Eric Dong Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Jian J Wang --- MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c | 166 +++++++++++++++++++++++ MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h | 14 ++ 2 files changed, 180 insertions(+) diff --git a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c index 29b6205e88..7a859606c6 100644 --- a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c +++ b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c @@ -234,6 +234,166 @@ Split1GPageTo2M ( } } +/** + Set one page (4KB) of memory to be read-only. + + @param[in] PageTableBase Base address of page table (CR3). + @param[in] Address Start address of a page to be set as read-only. + +**/ +VOID +SetPageReadOnly ( + IN UINTN PageTableBase, + IN PHYSICAL_ADDRESS Address + ) +{ + UINTN Index; + UINTN Index1; + UINTN Index2; + UINTN Index3; + UINTN Index4; + UINT64 *L1PageTable; + UINT64 *L2PageTable; + UINT64 *L3PageTable; + UINT64 *L4PageTable; + UINT64 AddressEncMask; + PHYSICAL_ADDRESS PhysicalAddress; + + ASSERT (PageTableBase != 0); + + Index4 = ((UINTN)RShiftU64 (Address, PAGING_L4_ADDRESS_SHIFT)) & + PAGING_PAE_INDEX_MASK; + ASSERT (Index4 < PAGING_PML4E_NUMBER); + + Index3 = ((UINTN)Address >> PAGING_L3_ADDRESS_SHIFT) & PAGING_PAE_INDEX_MASK; + Index2 = ((UINTN)Address >> PAGING_L2_ADDRESS_SHIFT) & PAGING_PAE_INDEX_MASK; + Index1 = ((UINTN)Address >> PAGING_L1_ADDRESS_SHIFT) & PAGING_PAE_INDEX_MASK; + + // + // Make sure AddressEncMask is contained to smallest supported address field. + // + AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & + PAGING_1G_ADDRESS_MASK_64; + + L4PageTable = (UINT64 *)(UINTN)PageTableBase; + L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~AddressEncMask & + PAGING_4K_ADDRESS_MASK_64); + if ((L3PageTable[Index3] & IA32_PG_PS) != 0) { + // 1G page. Split to 2M. + L2PageTable = AllocatePages (1); + ASSERT (L2PageTable != NULL); + + PhysicalAddress = L3PageTable[Index3] & PAGING_1G_ADDRESS_MASK_64; + for (Index = 0; Index < EFI_PAGE_SIZE/sizeof (UINT64); ++Index) { + L2PageTable[Index] = PhysicalAddress | AddressEncMask | + IA32_PG_PS | IA32_PG_P | IA32_PG_RW; + PhysicalAddress += SIZE_2MB; + } + + L3PageTable[Index3] = (UINT64) (UINTN) L2PageTable | AddressEncMask | + IA32_PG_P | IA32_PG_RW; + SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable); + } + + L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~AddressEncMask & + PAGING_4K_ADDRESS_MASK_64); + if ((L2PageTable[Index2] & IA32_PG_PS) != 0) { + // 2M page. Split to 4K. + L1PageTable = AllocatePages (1); + ASSERT (L1PageTable != NULL); + + PhysicalAddress = L2PageTable[Index2] & PAGING_2M_ADDRESS_MASK_64; + for (Index = 0; Index < EFI_PAGE_SIZE/sizeof (UINT64); ++Index) { + L1PageTable[Index] = PhysicalAddress | AddressEncMask | + IA32_PG_P | IA32_PG_RW; + PhysicalAddress += SIZE_4KB; + } + + L2PageTable[Index2] = (UINT64)(UINTN)L1PageTable | AddressEncMask | + IA32_PG_P | IA32_PG_RW; + SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable); + } + + // 4k + L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~AddressEncMask & + PAGING_4K_ADDRESS_MASK_64); + L1PageTable[Index1] &= ~IA32_PG_RW; +} + +/** + Prevent the memory pages used for page table from been overwritten. + + @param[in] PageTableBase Base address of page table (CR3). + +**/ +VOID +EnablePageTableProtection ( + IN UINTN PageTableBase + ) +{ + UINTN Index2; + UINTN Index3; + UINTN Index4; + UINT64 *L1PageTable; + UINT64 *L2PageTable; + UINT64 *L3PageTable; + UINT64 *L4PageTable; + UINT64 AddressEncMask; + + // + // Disable write protection, because we need to mark page table to be write + // protected. + // + AsmWriteCr0 (AsmReadCr0() & ~CR0_WP); + + AddressEncMask = PcdGet64 (PcdPteMemoryEncryptionAddressOrMask) & + PAGING_1G_ADDRESS_MASK_64; + L4PageTable = (UINT64 *)PageTableBase; + SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L4PageTable); + + for (Index4 = 0; Index4 < PAGING_PML4E_NUMBER; Index4++) { + L3PageTable = (UINT64 *)(UINTN)(L4PageTable[Index4] & ~AddressEncMask & + PAGING_4K_ADDRESS_MASK_64); + if (L3PageTable == NULL) { + continue; + } + SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable); + + for (Index3 = 0; Index3 < EFI_PAGE_SIZE/sizeof(UINT64); Index3++) { + if ((L3PageTable[Index3] & IA32_PG_PS) != 0) { + // 1G + continue; + } + + L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~AddressEncMask & + PAGING_4K_ADDRESS_MASK_64); + if (L2PageTable == NULL) { + continue; + } + SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable); + + for (Index2 = 0; Index2 < EFI_PAGE_SIZE/sizeof(UINT64); Index2++) { + if ((L2PageTable[Index2] & IA32_PG_PS) != 0) { + // 2M + continue; + } + + L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~AddressEncMask & + PAGING_4K_ADDRESS_MASK_64); + if (L1PageTable == NULL) { + continue; + } + SetPageReadOnly (PageTableBase, (EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable); + } + } + } + + // + // Enable write protection, after page table updated. + // + AsmWriteCr0 (AsmReadCr0() | CR0_WP); +} + /** Allocates and fills in the Page Directory and Page Table Entries to establish a 1:1 Virtual to Physical mapping. @@ -430,6 +590,12 @@ CreateIdentityMappingPageTables ( ); } + // + // Protect the page table by marking the memory used for page table to be + // read-only. + // + EnablePageTableProtection ((UINTN)PageMap); + if (PcdGetBool (PcdSetNxForStack)) { EnableExecuteDisableBit (); } diff --git a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h index 7c9bb49e3e..6d1961b6f8 100644 --- a/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h +++ b/MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h @@ -148,11 +148,25 @@ typedef union { #pragma pack() +#define CR0_WP BIT16 + #define IA32_PG_P BIT0 #define IA32_PG_RW BIT1 +#define IA32_PG_PS BIT7 + +#define PAGING_PAE_INDEX_MASK 0x1FF +#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull +#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull #define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull +#define PAGING_L1_ADDRESS_SHIFT 12 +#define PAGING_L2_ADDRESS_SHIFT 21 +#define PAGING_L3_ADDRESS_SHIFT 30 +#define PAGING_L4_ADDRESS_SHIFT 39 + +#define PAGING_PML4E_NUMBER 4 + /** Enable Execute Disable Bit. -- 2.14.1.windows.1