* [PATCH v3 0/2] ArmPkg/ArmMmuLib: rewrite and improve cache handling with MMU off
@ 2020-03-06 16:12 Ard Biesheuvel
2020-03-06 16:12 ` [PATCH v3 1/2] ArmPkg/ArmMmuLib AARCH64: rewrite page table code Ard Biesheuvel
2020-03-06 16:12 ` [PATCH v3 2/2] ArmPkg/ArmMmuLib AARCH64: invalidate page tables before populating them Ard Biesheuvel
0 siblings, 2 replies; 6+ messages in thread
From: Ard Biesheuvel @ 2020-03-06 16:12 UTC (permalink / raw)
To: devel; +Cc: leif, Ard Biesheuvel
I finally got fed up with the state of the AArch64 MMU handling code, and
decided to rewrite it before rebasing the cache invalidation fix onto
it.
Ard Biesheuvel (2):
ArmPkg/ArmMmuLib AARCH64: rewrite page table code
ArmPkg/ArmMmuLib AARCH64: invalidate page tables before populating
them
.../Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 344 ++++++------------
1 file changed, 121 insertions(+), 223 deletions(-)
--
2.17.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v3 1/2] ArmPkg/ArmMmuLib AARCH64: rewrite page table code
2020-03-06 16:12 [PATCH v3 0/2] ArmPkg/ArmMmuLib: rewrite and improve cache handling with MMU off Ard Biesheuvel
@ 2020-03-06 16:12 ` Ard Biesheuvel
2020-03-06 18:50 ` Leif Lindholm
2020-03-06 16:12 ` [PATCH v3 2/2] ArmPkg/ArmMmuLib AARCH64: invalidate page tables before populating them Ard Biesheuvel
1 sibling, 1 reply; 6+ messages in thread
From: Ard Biesheuvel @ 2020-03-06 16:12 UTC (permalink / raw)
To: devel; +Cc: leif, Ard Biesheuvel
Replace the slightly overcomplicated page table management code with
a simplified, recursive implementation that should be far easier to
reason about.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 332 +++++++-------------
1 file changed, 108 insertions(+), 224 deletions(-)
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
index 204e33c75f95..e36594fea3ad 100644
--- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
+++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
@@ -121,14 +121,16 @@ GetRootTranslationTableInfo (
STATIC
VOID
-ReplaceLiveEntry (
+ReplaceTableEntry (
IN UINT64 *Entry,
IN UINT64 Value,
- IN UINT64 RegionStart
+ IN UINT64 RegionStart,
+ IN BOOLEAN IsLiveBlockMapping
)
{
- if (!ArmMmuEnabled ()) {
+ if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
*Entry = Value;
+ ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
} else {
ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
}
@@ -165,229 +167,132 @@ LookupAddresstoRootTable (
}
STATIC
-UINT64*
-GetBlockEntryListFromAddress (
- IN UINT64 *RootTable,
- IN UINT64 RegionStart,
- OUT UINTN *TableLevel,
- IN OUT UINT64 *BlockEntrySize,
- OUT UINT64 **LastBlockEntry
+EFI_STATUS
+UpdateRegionMappingRec (
+ IN UINT64 RegionStart,
+ IN UINT64 RegionEnd,
+ IN UINT64 AttributeSetMask,
+ IN UINT64 AttributeClearMask,
+ IN UINT64 *PageTable,
+ IN UINTN Level
)
{
- UINTN RootTableLevel;
- UINTN RootTableEntryCount;
- UINT64 *TranslationTable;
- UINT64 *BlockEntry;
- UINT64 *SubTableBlockEntry;
- UINT64 BlockEntryAddress;
- UINTN BaseAddressAlignment;
- UINTN PageLevel;
- UINTN Index;
- UINTN IndexLevel;
- UINTN T0SZ;
- UINT64 Attributes;
- UINT64 TableAttributes;
-
- // Initialize variable
- BlockEntry = NULL;
-
- // Ensure the parameters are valid
- if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
- ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
- return NULL;
- }
-
- // Ensure the Region is aligned on 4KB boundary
- if ((RegionStart & (SIZE_4KB - 1)) != 0) {
- ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
- return NULL;
- }
-
- // Ensure the required size is aligned on 4KB boundary and not 0
- if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
- ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
- return NULL;
- }
-
- T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
- // Get the Table info from T0SZ
- GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
-
- // If the start address is 0x0 then we use the size of the region to identify the alignment
- if (RegionStart == 0) {
- // Identify the highest possible alignment for the Region Size
- BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
- } else {
- // Identify the highest possible alignment for the Base Address
- BaseAddressAlignment = LowBitSet64 (RegionStart);
- }
-
- // Identify the Page Level the RegionStart must belong to. Note that PageLevel
- // should be at least 1 since block translations are not supported at level 0
- PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
-
- // If the required size is smaller than the current block size then we need to go to the page below.
- // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
- // of the allocation size
- while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
- // It does not fit so we need to go a page level above
- PageLevel++;
- }
-
- //
- // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
- //
-
- TranslationTable = RootTable;
- for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
- BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
-
- if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
- // Go to the next table
- TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
-
- // If we are at the last level then update the last level to next level
- if (IndexLevel == PageLevel) {
- // Enter the next level
- PageLevel++;
- }
- } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
- // If we are not at the last level then we need to split this BlockEntry
- if (IndexLevel != PageLevel) {
- // Retrieve the attributes from the block entry
- Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
-
- // Convert the block entry attributes into Table descriptor attributes
- TableAttributes = TT_TABLE_AP_NO_PERMISSION;
- if (Attributes & TT_NS) {
- TableAttributes = TT_TABLE_NS;
+ UINTN BlockShift;
+ UINT64 BlockMask;
+ UINT64 BlockEnd;
+ UINT64 *Entry;
+ UINT64 EntryValue;
+ VOID *TranslationTable;
+ EFI_STATUS Status;
+
+ ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
+
+ BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
+ BlockMask = MAX_UINT64 >> BlockShift;
+
+ DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
+ Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
+
+ for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
+ BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
+ Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
+
+ //
+ // If RegionStart or BlockEnd is not aligned to the block size at this
+ // level, we will have to create a table mapping in order to map less
+ // than a block, and recurse to create the block or page entries at
+ // the next level. No block mappings are allowed at all at level 0,
+ // so in that case, we have to recurse unconditionally.
+ //
+ if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {
+ ASSERT (Level < 3);
+
+ if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
+ //
+ // No table entry exists yet, so we need to allocate a page table
+ // for the next level.
+ //
+ TranslationTable = AllocatePages (1);
+ if (TranslationTable == NULL) {
+ return EFI_OUT_OF_RESOURCES;
}
- // Get the address corresponding at this entry
- BlockEntryAddress = RegionStart;
- BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
- // Shift back to right to set zero before the effective address
- BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
-
- // Set the correct entry type for the next page level
- if ((IndexLevel + 1) == 3) {
- Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
+ if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
+ //
+ // We are splitting an existing block entry, so we have to populate
+ // the new table with the attributes of the block entry it replaces.
+ //
+ Status = UpdateRegionMappingRec (
+ RegionStart & ~BlockMask,
+ (RegionStart | BlockMask) + 1,
+ *Entry & TT_ATTRIBUTES_MASK,
+ 0,
+ TranslationTable,
+ Level + 1);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
} else {
- Attributes |= TT_TYPE_BLOCK_ENTRY;
+ ZeroMem (TranslationTable, EFI_PAGE_SIZE);
}
+ } else {
+ TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
+ }
- // Create a new translation table
- TranslationTable = AllocatePages (1);
- if (TranslationTable == NULL) {
- return NULL;
- }
-
- // Populate the newly created lower level table
- SubTableBlockEntry = TranslationTable;
- for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
- *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
- SubTableBlockEntry++;
- }
+ //
+ // Recurse to the next level
+ //
+ Status = UpdateRegionMappingRec (
+ RegionStart,
+ BlockEnd,
+ AttributeSetMask,
+ AttributeClearMask,
+ TranslationTable,
+ Level + 1);
+ if (EFI_ERROR (Status)) {
+ return Status;
+ }
- // Fill the BlockEntry with the new TranslationTable
- ReplaceLiveEntry (BlockEntry,
- (UINTN)TranslationTable | TableAttributes | TT_TYPE_TABLE_ENTRY,
- RegionStart);
+ if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
+ ReplaceTableEntry (Entry,
+ (UINT64)TranslationTable | TT_TYPE_TABLE_ENTRY,
+ RegionStart,
+ (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY);
}
} else {
- if (IndexLevel != PageLevel) {
- //
- // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
- //
+ EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
+ EntryValue |= RegionStart;
+ EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
+ : TT_TYPE_BLOCK_ENTRY;
- // Create a new translation table
- TranslationTable = AllocatePages (1);
- if (TranslationTable == NULL) {
- return NULL;
- }
-
- ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
-
- // Fill the new BlockEntry with the TranslationTable
- *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
- }
+ ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
}
}
-
- // Expose the found PageLevel to the caller
- *TableLevel = PageLevel;
-
- // Now, we have the Table Level we can get the Block Size associated to this table
- *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
-
- // The last block of the root table depends on the number of entry in this table,
- // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
- *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
- (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
-
- return BlockEntry;
+ return EFI_SUCCESS;
}
STATIC
EFI_STATUS
UpdateRegionMapping (
- IN UINT64 *RootTable,
- IN UINT64 RegionStart,
- IN UINT64 RegionLength,
- IN UINT64 Attributes,
- IN UINT64 BlockEntryMask
+ IN UINT64 RegionStart,
+ IN UINT64 RegionSize,
+ IN UINT64 AttributeSetMask,
+ IN UINT64 AttributeClearMask
)
{
- UINT32 Type;
- UINT64 *BlockEntry;
- UINT64 *LastBlockEntry;
- UINT64 BlockEntrySize;
- UINTN TableLevel;
+ UINTN RootTableLevel;
+ UINTN T0SZ;
- // Ensure the Length is aligned on 4KB boundary
- if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
- ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
+ if ((RegionStart & EFI_PAGE_MASK) != 0 || (RegionSize & EFI_PAGE_MASK) != 0) {
return EFI_INVALID_PARAMETER;
}
- do {
- // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
- // such as the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
- BlockEntrySize = RegionLength;
- BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
- if (BlockEntry == NULL) {
- // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
- return EFI_OUT_OF_RESOURCES;
- }
+ T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
+ GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
- if (TableLevel != 3) {
- Type = TT_TYPE_BLOCK_ENTRY;
- } else {
- Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
- }
-
- do {
- // Fill the Block Entry with attribute and output block address
- *BlockEntry &= BlockEntryMask;
- *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
-
- ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);
-
- // Go to the next BlockEntry
- RegionStart += BlockEntrySize;
- RegionLength -= BlockEntrySize;
- BlockEntry++;
-
- // Break the inner loop when next block is a table
- // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
- if (TableLevel != 3 && BlockEntry <= LastBlockEntry &&
- (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
- break;
- }
- } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
- } while (RegionLength != 0);
-
- return EFI_SUCCESS;
+ return UpdateRegionMappingRec (RegionStart, RegionStart + RegionSize,
+ AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
+ RootTableLevel);
}
STATIC
@@ -398,7 +303,6 @@ FillTranslationTable (
)
{
return UpdateRegionMapping (
- RootTable,
MemoryRegion->VirtualBase,
MemoryRegion->Length,
ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
@@ -455,8 +359,6 @@ ArmSetMemoryAttributes (
IN UINT64 Attributes
)
{
- EFI_STATUS Status;
- UINT64 *TranslationTable;
UINT64 PageAttributes;
UINT64 PageAttributeMask;
@@ -473,19 +375,11 @@ ArmSetMemoryAttributes (
TT_PXN_MASK | TT_XN_MASK);
}
- TranslationTable = ArmGetTTBR0BaseAddress ();
-
- Status = UpdateRegionMapping (
- TranslationTable,
+ return UpdateRegionMapping (
BaseAddress,
Length,
PageAttributes,
PageAttributeMask);
- if (EFI_ERROR (Status)) {
- return Status;
- }
-
- return EFI_SUCCESS;
}
STATIC
@@ -497,17 +391,7 @@ SetMemoryRegionAttribute (
IN UINT64 BlockEntryMask
)
{
- EFI_STATUS Status;
- UINT64 *RootTable;
-
- RootTable = ArmGetTTBR0BaseAddress ();
-
- Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
- if (EFI_ERROR (Status)) {
- return Status;
- }
-
- return EFI_SUCCESS;
+ return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
}
EFI_STATUS
--
2.17.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v3 2/2] ArmPkg/ArmMmuLib AARCH64: invalidate page tables before populating them
2020-03-06 16:12 [PATCH v3 0/2] ArmPkg/ArmMmuLib: rewrite and improve cache handling with MMU off Ard Biesheuvel
2020-03-06 16:12 ` [PATCH v3 1/2] ArmPkg/ArmMmuLib AARCH64: rewrite page table code Ard Biesheuvel
@ 2020-03-06 16:12 ` Ard Biesheuvel
2020-03-06 18:51 ` Leif Lindholm
1 sibling, 1 reply; 6+ messages in thread
From: Ard Biesheuvel @ 2020-03-06 16:12 UTC (permalink / raw)
To: devel; +Cc: leif, Ard Biesheuvel
As it turns out, ARMv8 also permits accesses made with the MMU and
caches off to hit in the caches, so to ensure that any modifications
we make before enabling the MMU are visible afterwards as well, we
should invalidate page tables right after allocation like we do now on
ARM, if the MMU is still disabled at that point.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
index e36594fea3ad..10ca8bac6a3f 100644
--- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
+++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
@@ -217,6 +217,14 @@ UpdateRegionMappingRec (
return EFI_OUT_OF_RESOURCES;
}
+ if (!ArmMmuEnabled ()) {
+ //
+ // Make sure we are not inadvertently hitting in the caches
+ // when populating the page tables.
+ //
+ InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
+ }
+
if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
//
// We are splitting an existing block entry, so we have to populate
@@ -581,6 +589,12 @@ ArmConfigureMmu (
*TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
}
+ //
+ // Make sure we are not inadvertently hitting in the caches
+ // when populating the page tables.
+ //
+ InvalidateDataCacheRange (TranslationTable,
+ RootTableEntryCount * sizeof(UINT64));
ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
TranslationTableAttribute = TT_ATTR_INDX_INVALID;
--
2.17.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v3 1/2] ArmPkg/ArmMmuLib AARCH64: rewrite page table code
2020-03-06 16:12 ` [PATCH v3 1/2] ArmPkg/ArmMmuLib AARCH64: rewrite page table code Ard Biesheuvel
@ 2020-03-06 18:50 ` Leif Lindholm
2020-03-07 7:15 ` Ard Biesheuvel
0 siblings, 1 reply; 6+ messages in thread
From: Leif Lindholm @ 2020-03-06 18:50 UTC (permalink / raw)
To: Ard Biesheuvel; +Cc: devel
On Fri, Mar 06, 2020 at 17:12:45 +0100, Ard Biesheuvel wrote:
> Replace the slightly overcomplicated page table management code with
> a simplified, recursive implementation that should be far easier to
> reason about.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
> ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 332 +++++++-------------
> 1 file changed, 108 insertions(+), 224 deletions(-)
>
> diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> index 204e33c75f95..e36594fea3ad 100644
> --- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> +++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> @@ -121,14 +121,16 @@ GetRootTranslationTableInfo (
>
> STATIC
> VOID
> -ReplaceLiveEntry (
> +ReplaceTableEntry (
> IN UINT64 *Entry,
> IN UINT64 Value,
> - IN UINT64 RegionStart
> + IN UINT64 RegionStart,
> + IN BOOLEAN IsLiveBlockMapping
> )
> {
> - if (!ArmMmuEnabled ()) {
> + if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
> *Entry = Value;
> + ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
> } else {
> ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
> }
> @@ -165,229 +167,132 @@ LookupAddresstoRootTable (
> }
>
> STATIC
> -UINT64*
> -GetBlockEntryListFromAddress (
> - IN UINT64 *RootTable,
> - IN UINT64 RegionStart,
> - OUT UINTN *TableLevel,
> - IN OUT UINT64 *BlockEntrySize,
> - OUT UINT64 **LastBlockEntry
> +EFI_STATUS
> +UpdateRegionMappingRec (
What does Rec stand for? Record? Recursively?
Could it be written out?
> + IN UINT64 RegionStart,
> + IN UINT64 RegionEnd,
> + IN UINT64 AttributeSetMask,
> + IN UINT64 AttributeClearMask,
> + IN UINT64 *PageTable,
> + IN UINTN Level
This sets the scene for a spectacularly unhelpful diff.
Would you consider adding UpdateRegionMappingRec() *before*
LookupAddresstoRootTable() for the only purpose of working against the
shortcomings of the line diff algorithm? That is what I ended up doing
locally for the review.
> )
> {
> - UINTN RootTableLevel;
> - UINTN RootTableEntryCount;
> - UINT64 *TranslationTable;
> - UINT64 *BlockEntry;
> - UINT64 *SubTableBlockEntry;
> - UINT64 BlockEntryAddress;
> - UINTN BaseAddressAlignment;
> - UINTN PageLevel;
> - UINTN Index;
> - UINTN IndexLevel;
> - UINTN T0SZ;
> - UINT64 Attributes;
> - UINT64 TableAttributes;
> -
> - // Initialize variable
> - BlockEntry = NULL;
> -
> - // Ensure the parameters are valid
> - if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
> - ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
> - return NULL;
> - }
> -
> - // Ensure the Region is aligned on 4KB boundary
> - if ((RegionStart & (SIZE_4KB - 1)) != 0) {
> - ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
> - return NULL;
> - }
> -
> - // Ensure the required size is aligned on 4KB boundary and not 0
> - if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
> - ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
> - return NULL;
> - }
> -
> - T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
> - // Get the Table info from T0SZ
> - GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
> -
> - // If the start address is 0x0 then we use the size of the region to identify the alignment
> - if (RegionStart == 0) {
> - // Identify the highest possible alignment for the Region Size
> - BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
> - } else {
> - // Identify the highest possible alignment for the Base Address
> - BaseAddressAlignment = LowBitSet64 (RegionStart);
> - }
> -
> - // Identify the Page Level the RegionStart must belong to. Note that PageLevel
> - // should be at least 1 since block translations are not supported at level 0
> - PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
> -
> - // If the required size is smaller than the current block size then we need to go to the page below.
> - // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
> - // of the allocation size
> - while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
> - // It does not fit so we need to go a page level above
> - PageLevel++;
> - }
> -
> - //
> - // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
> - //
> -
> - TranslationTable = RootTable;
> - for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
> - BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
> -
> - if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
> - // Go to the next table
> - TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
> -
> - // If we are at the last level then update the last level to next level
> - if (IndexLevel == PageLevel) {
> - // Enter the next level
> - PageLevel++;
> - }
> - } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
> - // If we are not at the last level then we need to split this BlockEntry
> - if (IndexLevel != PageLevel) {
> - // Retrieve the attributes from the block entry
> - Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
> -
> - // Convert the block entry attributes into Table descriptor attributes
> - TableAttributes = TT_TABLE_AP_NO_PERMISSION;
> - if (Attributes & TT_NS) {
> - TableAttributes = TT_TABLE_NS;
> + UINTN BlockShift;
> + UINT64 BlockMask;
> + UINT64 BlockEnd;
> + UINT64 *Entry;
> + UINT64 EntryValue;
> + VOID *TranslationTable;
> + EFI_STATUS Status;
> +
> + ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
> +
> + BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
> + BlockMask = MAX_UINT64 >> BlockShift;
> +
> + DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
> + Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
> +
> + for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
> + BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
> + Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
> +
> + //
> + // If RegionStart or BlockEnd is not aligned to the block size at this
> + // level, we will have to create a table mapping in order to map less
> + // than a block, and recurse to create the block or page entries at
> + // the next level. No block mappings are allowed at all at level 0,
> + // so in that case, we have to recurse unconditionally.
> + //
> + if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {
> + ASSERT (Level < 3);
> +
> + if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
> + //
> + // No table entry exists yet, so we need to allocate a page table
> + // for the next level.
> + //
> + TranslationTable = AllocatePages (1);
> + if (TranslationTable == NULL) {
> + return EFI_OUT_OF_RESOURCES;
> }
>
> - // Get the address corresponding at this entry
> - BlockEntryAddress = RegionStart;
> - BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
> - // Shift back to right to set zero before the effective address
> - BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
> -
> - // Set the correct entry type for the next page level
> - if ((IndexLevel + 1) == 3) {
> - Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
> + if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
> + //
> + // We are splitting an existing block entry, so we have to populate
> + // the new table with the attributes of the block entry it replaces.
> + //
> + Status = UpdateRegionMappingRec (
> + RegionStart & ~BlockMask,
> + (RegionStart | BlockMask) + 1,
> + *Entry & TT_ATTRIBUTES_MASK,
> + 0,
> + TranslationTable,
> + Level + 1);
> + if (EFI_ERROR (Status)) {
I realise a EFI_OUT_OF_RESOURCES return value here means we would be
in dire straits already, but it would be nice if we could free the
local TranslationTable instead of leaking all allocations through the
tree.
If not worth the effort (or impossible condition), would still be
worth an explicit comment here.
> + return Status;
> + }
> } else {
> - Attributes |= TT_TYPE_BLOCK_ENTRY;
> + ZeroMem (TranslationTable, EFI_PAGE_SIZE);
> }
> + } else {
> + TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
> + }
>
> - // Create a new translation table
> - TranslationTable = AllocatePages (1);
> - if (TranslationTable == NULL) {
> - return NULL;
> - }
> -
> - // Populate the newly created lower level table
> - SubTableBlockEntry = TranslationTable;
> - for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
> - *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
> - SubTableBlockEntry++;
> - }
> + //
> + // Recurse to the next level
> + //
> + Status = UpdateRegionMappingRec (
> + RegionStart,
> + BlockEnd,
> + AttributeSetMask,
> + AttributeClearMask,
> + TranslationTable,
> + Level + 1);
> + if (EFI_ERROR (Status)) {
> + return Status;
> + }
>
> - // Fill the BlockEntry with the new TranslationTable
> - ReplaceLiveEntry (BlockEntry,
> - (UINTN)TranslationTable | TableAttributes | TT_TYPE_TABLE_ENTRY,
> - RegionStart);
> + if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
> + ReplaceTableEntry (Entry,
> + (UINT64)TranslationTable | TT_TYPE_TABLE_ENTRY,
> + RegionStart,
> + (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY);
> }
> } else {
> - if (IndexLevel != PageLevel) {
> - //
> - // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
> - //
> + EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
> + EntryValue |= RegionStart;
> + EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
> + : TT_TYPE_BLOCK_ENTRY;
>
> - // Create a new translation table
> - TranslationTable = AllocatePages (1);
> - if (TranslationTable == NULL) {
> - return NULL;
> - }
> -
> - ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
> -
> - // Fill the new BlockEntry with the TranslationTable
> - *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
> - }
> + ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
> }
> }
> -
> - // Expose the found PageLevel to the caller
> - *TableLevel = PageLevel;
> -
> - // Now, we have the Table Level we can get the Block Size associated to this table
> - *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
> -
> - // The last block of the root table depends on the number of entry in this table,
> - // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
> - *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
> - (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
> -
> - return BlockEntry;
> + return EFI_SUCCESS;
> }
>
> STATIC
> EFI_STATUS
> UpdateRegionMapping (
> - IN UINT64 *RootTable,
> - IN UINT64 RegionStart,
> - IN UINT64 RegionLength,
> - IN UINT64 Attributes,
> - IN UINT64 BlockEntryMask
> + IN UINT64 RegionStart,
> + IN UINT64 RegionSize,
> + IN UINT64 AttributeSetMask,
> + IN UINT64 AttributeClearMask
The new indentation here (which arguably is an improvement) sort of
masks the unchanged input RegionStart and the rename of
RegionLength->RegionSize. Since it doesn't help anything in this
series, could it be deferred (or broken out)?
> )
> {
> - UINT32 Type;
> - UINT64 *BlockEntry;
> - UINT64 *LastBlockEntry;
> - UINT64 BlockEntrySize;
> - UINTN TableLevel;
> + UINTN RootTableLevel;
> + UINTN T0SZ;
>
> - // Ensure the Length is aligned on 4KB boundary
> - if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
> - ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
> + if ((RegionStart & EFI_PAGE_MASK) != 0 || (RegionSize & EFI_PAGE_MASK) != 0) {
> return EFI_INVALID_PARAMETER;
> }
>
> - do {
> - // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
> - // such as the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
> - BlockEntrySize = RegionLength;
> - BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
> - if (BlockEntry == NULL) {
> - // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
> - return EFI_OUT_OF_RESOURCES;
> - }
> + T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
> + GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
>
> - if (TableLevel != 3) {
> - Type = TT_TYPE_BLOCK_ENTRY;
> - } else {
> - Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
> - }
> -
> - do {
> - // Fill the Block Entry with attribute and output block address
> - *BlockEntry &= BlockEntryMask;
> - *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
> -
> - ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);
> -
> - // Go to the next BlockEntry
> - RegionStart += BlockEntrySize;
> - RegionLength -= BlockEntrySize;
> - BlockEntry++;
> -
> - // Break the inner loop when next block is a table
> - // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
> - if (TableLevel != 3 && BlockEntry <= LastBlockEntry &&
> - (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
> - break;
> - }
> - } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
> - } while (RegionLength != 0);
> -
> - return EFI_SUCCESS;
> + return UpdateRegionMappingRec (RegionStart, RegionStart + RegionSize,
> + AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
> + RootTableLevel);
> }
>
> STATIC
> @@ -398,7 +303,6 @@ FillTranslationTable (
> )
> {
> return UpdateRegionMapping (
> - RootTable,
> MemoryRegion->VirtualBase,
> MemoryRegion->Length,
> ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
> @@ -455,8 +359,6 @@ ArmSetMemoryAttributes (
> IN UINT64 Attributes
> )
> {
> - EFI_STATUS Status;
> - UINT64 *TranslationTable;
> UINT64 PageAttributes;
> UINT64 PageAttributeMask;
>
> @@ -473,19 +375,11 @@ ArmSetMemoryAttributes (
> TT_PXN_MASK | TT_XN_MASK);
> }
>
> - TranslationTable = ArmGetTTBR0BaseAddress ();
> -
> - Status = UpdateRegionMapping (
> - TranslationTable,
> + return UpdateRegionMapping (
> BaseAddress,
> Length,
> PageAttributes,
> PageAttributeMask);
While making a neat and clear diff, this messes up coding style.
/
Leif
> - if (EFI_ERROR (Status)) {
> - return Status;
> - }
> -
> - return EFI_SUCCESS;
> }
>
> STATIC
> @@ -497,17 +391,7 @@ SetMemoryRegionAttribute (
> IN UINT64 BlockEntryMask
> )
> {
> - EFI_STATUS Status;
> - UINT64 *RootTable;
> -
> - RootTable = ArmGetTTBR0BaseAddress ();
> -
> - Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
> - if (EFI_ERROR (Status)) {
> - return Status;
> - }
> -
> - return EFI_SUCCESS;
> + return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
> }
>
> EFI_STATUS
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v3 2/2] ArmPkg/ArmMmuLib AARCH64: invalidate page tables before populating them
2020-03-06 16:12 ` [PATCH v3 2/2] ArmPkg/ArmMmuLib AARCH64: invalidate page tables before populating them Ard Biesheuvel
@ 2020-03-06 18:51 ` Leif Lindholm
0 siblings, 0 replies; 6+ messages in thread
From: Leif Lindholm @ 2020-03-06 18:51 UTC (permalink / raw)
To: Ard Biesheuvel; +Cc: devel
On Fri, Mar 06, 2020 at 17:12:46 +0100, Ard Biesheuvel wrote:
> As it turns out, ARMv8 also permits accesses made with the MMU and
> caches off to hit in the caches, so to ensure that any modifications
> we make before enabling the MMU are visible afterwards as well, we
> should invalidate page tables right after allocation like we do now on
> ARM, if the MMU is still disabled at that point.
>
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Urgh.
Reviewed-by: Leif Lindholm <leif@nuviainc.com>
> ---
> ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 14 ++++++++++++++
> 1 file changed, 14 insertions(+)
>
> diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> index e36594fea3ad..10ca8bac6a3f 100644
> --- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> +++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> @@ -217,6 +217,14 @@ UpdateRegionMappingRec (
> return EFI_OUT_OF_RESOURCES;
> }
>
> + if (!ArmMmuEnabled ()) {
> + //
> + // Make sure we are not inadvertently hitting in the caches
> + // when populating the page tables.
> + //
> + InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
> + }
> +
> if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
> //
> // We are splitting an existing block entry, so we have to populate
> @@ -581,6 +589,12 @@ ArmConfigureMmu (
> *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
> }
>
> + //
> + // Make sure we are not inadvertently hitting in the caches
> + // when populating the page tables.
> + //
> + InvalidateDataCacheRange (TranslationTable,
> + RootTableEntryCount * sizeof(UINT64));
> ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
>
> TranslationTableAttribute = TT_ATTR_INDX_INVALID;
> --
> 2.17.1
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v3 1/2] ArmPkg/ArmMmuLib AARCH64: rewrite page table code
2020-03-06 18:50 ` Leif Lindholm
@ 2020-03-07 7:15 ` Ard Biesheuvel
0 siblings, 0 replies; 6+ messages in thread
From: Ard Biesheuvel @ 2020-03-07 7:15 UTC (permalink / raw)
To: Leif Lindholm; +Cc: edk2-devel-groups-io
On Fri, 6 Mar 2020 at 19:50, Leif Lindholm <leif@nuviainc.com> wrote:
>
> On Fri, Mar 06, 2020 at 17:12:45 +0100, Ard Biesheuvel wrote:
> > Replace the slightly overcomplicated page table management code with
> > a simplified, recursive implementation that should be far easier to
> > reason about.
> >
> > Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> > ---
> > ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c | 332 +++++++-------------
> > 1 file changed, 108 insertions(+), 224 deletions(-)
> >
> > diff --git a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> > index 204e33c75f95..e36594fea3ad 100644
> > --- a/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> > +++ b/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
> > @@ -121,14 +121,16 @@ GetRootTranslationTableInfo (
> >
> > STATIC
> > VOID
> > -ReplaceLiveEntry (
> > +ReplaceTableEntry (
> > IN UINT64 *Entry,
> > IN UINT64 Value,
> > - IN UINT64 RegionStart
> > + IN UINT64 RegionStart,
> > + IN BOOLEAN IsLiveBlockMapping
> > )
> > {
> > - if (!ArmMmuEnabled ()) {
> > + if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
> > *Entry = Value;
> > + ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
> > } else {
> > ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
> > }
> > @@ -165,229 +167,132 @@ LookupAddresstoRootTable (
> > }
> >
> > STATIC
> > -UINT64*
> > -GetBlockEntryListFromAddress (
> > - IN UINT64 *RootTable,
> > - IN UINT64 RegionStart,
> > - OUT UINTN *TableLevel,
> > - IN OUT UINT64 *BlockEntrySize,
> > - OUT UINT64 **LastBlockEntry
> > +EFI_STATUS
> > +UpdateRegionMappingRec (
>
> What does Rec stand for? Record? Recursively?
> Could it be written out?
>
> > + IN UINT64 RegionStart,
> > + IN UINT64 RegionEnd,
> > + IN UINT64 AttributeSetMask,
> > + IN UINT64 AttributeClearMask,
> > + IN UINT64 *PageTable,
> > + IN UINTN Level
>
> This sets the scene for a spectacularly unhelpful diff.
> Would you consider adding UpdateRegionMappingRec() *before*
> LookupAddresstoRootTable() for the only purpose of working against the
> shortcomings of the line diff algorithm? That is what I ended up doing
> locally for the review.
>
Yeah works for me
> > )
> > {
> > - UINTN RootTableLevel;
> > - UINTN RootTableEntryCount;
> > - UINT64 *TranslationTable;
> > - UINT64 *BlockEntry;
> > - UINT64 *SubTableBlockEntry;
> > - UINT64 BlockEntryAddress;
> > - UINTN BaseAddressAlignment;
> > - UINTN PageLevel;
> > - UINTN Index;
> > - UINTN IndexLevel;
> > - UINTN T0SZ;
> > - UINT64 Attributes;
> > - UINT64 TableAttributes;
> > -
> > - // Initialize variable
> > - BlockEntry = NULL;
> > -
> > - // Ensure the parameters are valid
> > - if (!(TableLevel && BlockEntrySize && LastBlockEntry)) {
> > - ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
> > - return NULL;
> > - }
> > -
> > - // Ensure the Region is aligned on 4KB boundary
> > - if ((RegionStart & (SIZE_4KB - 1)) != 0) {
> > - ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
> > - return NULL;
> > - }
> > -
> > - // Ensure the required size is aligned on 4KB boundary and not 0
> > - if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
> > - ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
> > - return NULL;
> > - }
> > -
> > - T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
> > - // Get the Table info from T0SZ
> > - GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
> > -
> > - // If the start address is 0x0 then we use the size of the region to identify the alignment
> > - if (RegionStart == 0) {
> > - // Identify the highest possible alignment for the Region Size
> > - BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
> > - } else {
> > - // Identify the highest possible alignment for the Base Address
> > - BaseAddressAlignment = LowBitSet64 (RegionStart);
> > - }
> > -
> > - // Identify the Page Level the RegionStart must belong to. Note that PageLevel
> > - // should be at least 1 since block translations are not supported at level 0
> > - PageLevel = MAX (3 - ((BaseAddressAlignment - 12) / 9), 1);
> > -
> > - // If the required size is smaller than the current block size then we need to go to the page below.
> > - // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
> > - // of the allocation size
> > - while (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
> > - // It does not fit so we need to go a page level above
> > - PageLevel++;
> > - }
> > -
> > - //
> > - // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
> > - //
> > -
> > - TranslationTable = RootTable;
> > - for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
> > - BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
> > -
> > - if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
> > - // Go to the next table
> > - TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
> > -
> > - // If we are at the last level then update the last level to next level
> > - if (IndexLevel == PageLevel) {
> > - // Enter the next level
> > - PageLevel++;
> > - }
> > - } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
> > - // If we are not at the last level then we need to split this BlockEntry
> > - if (IndexLevel != PageLevel) {
> > - // Retrieve the attributes from the block entry
> > - Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
> > -
> > - // Convert the block entry attributes into Table descriptor attributes
> > - TableAttributes = TT_TABLE_AP_NO_PERMISSION;
> > - if (Attributes & TT_NS) {
> > - TableAttributes = TT_TABLE_NS;
> > + UINTN BlockShift;
> > + UINT64 BlockMask;
> > + UINT64 BlockEnd;
> > + UINT64 *Entry;
> > + UINT64 EntryValue;
> > + VOID *TranslationTable;
> > + EFI_STATUS Status;
> > +
> > + ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
> > +
> > + BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
> > + BlockMask = MAX_UINT64 >> BlockShift;
> > +
> > + DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
> > + Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
> > +
> > + for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
> > + BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
> > + Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
> > +
> > + //
> > + // If RegionStart or BlockEnd is not aligned to the block size at this
> > + // level, we will have to create a table mapping in order to map less
> > + // than a block, and recurse to create the block or page entries at
> > + // the next level. No block mappings are allowed at all at level 0,
> > + // so in that case, we have to recurse unconditionally.
> > + //
> > + if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0) {
> > + ASSERT (Level < 3);
> > +
> > + if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
> > + //
> > + // No table entry exists yet, so we need to allocate a page table
> > + // for the next level.
> > + //
> > + TranslationTable = AllocatePages (1);
> > + if (TranslationTable == NULL) {
> > + return EFI_OUT_OF_RESOURCES;
> > }
> >
> > - // Get the address corresponding at this entry
> > - BlockEntryAddress = RegionStart;
> > - BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
> > - // Shift back to right to set zero before the effective address
> > - BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
> > -
> > - // Set the correct entry type for the next page level
> > - if ((IndexLevel + 1) == 3) {
> > - Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
> > + if ((*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
> > + //
> > + // We are splitting an existing block entry, so we have to populate
> > + // the new table with the attributes of the block entry it replaces.
> > + //
> > + Status = UpdateRegionMappingRec (
> > + RegionStart & ~BlockMask,
> > + (RegionStart | BlockMask) + 1,
> > + *Entry & TT_ATTRIBUTES_MASK,
> > + 0,
> > + TranslationTable,
> > + Level + 1);
> > + if (EFI_ERROR (Status)) {
>
> I realise a EFI_OUT_OF_RESOURCES return value here means we would be
> in dire straits already, but it would be nice if we could free the
> local TranslationTable instead of leaking all allocations through the
> tree.
>
> If not worth the effort (or impossible condition), would still be
> worth an explicit comment here.
>
In PEI, it wouldn't help since FreePages() is a NOP there. But this
library is also used by CpuDxe, so it does make sense.
And the nice thing about recursion is that it is much simpler to
reason about: we only need to free the page if it was allocated in the
context of the same call, and everything else comes out in the wash.
> > + return Status;
> > + }
> > } else {
> > - Attributes |= TT_TYPE_BLOCK_ENTRY;
> > + ZeroMem (TranslationTable, EFI_PAGE_SIZE);
> > }
> > + } else {
> > + TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
> > + }
> >
> > - // Create a new translation table
> > - TranslationTable = AllocatePages (1);
> > - if (TranslationTable == NULL) {
> > - return NULL;
> > - }
> > -
> > - // Populate the newly created lower level table
> > - SubTableBlockEntry = TranslationTable;
> > - for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
> > - *SubTableBlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
> > - SubTableBlockEntry++;
> > - }
> > + //
> > + // Recurse to the next level
> > + //
> > + Status = UpdateRegionMappingRec (
> > + RegionStart,
> > + BlockEnd,
> > + AttributeSetMask,
> > + AttributeClearMask,
> > + TranslationTable,
> > + Level + 1);
> > + if (EFI_ERROR (Status)) {
> > + return Status;
> > + }
> >
> > - // Fill the BlockEntry with the new TranslationTable
> > - ReplaceLiveEntry (BlockEntry,
> > - (UINTN)TranslationTable | TableAttributes | TT_TYPE_TABLE_ENTRY,
> > - RegionStart);
> > + if ((*Entry & TT_TYPE_MASK) != TT_TYPE_TABLE_ENTRY) {
> > + ReplaceTableEntry (Entry,
> > + (UINT64)TranslationTable | TT_TYPE_TABLE_ENTRY,
> > + RegionStart,
> > + (*Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY);
> > }
> > } else {
> > - if (IndexLevel != PageLevel) {
> > - //
> > - // Case when we have an Invalid Entry and we are at a page level above of the one targetted.
> > - //
> > + EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
> > + EntryValue |= RegionStart;
> > + EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
> > + : TT_TYPE_BLOCK_ENTRY;
> >
> > - // Create a new translation table
> > - TranslationTable = AllocatePages (1);
> > - if (TranslationTable == NULL) {
> > - return NULL;
> > - }
> > -
> > - ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
> > -
> > - // Fill the new BlockEntry with the TranslationTable
> > - *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
> > - }
> > + ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
> > }
> > }
> > -
> > - // Expose the found PageLevel to the caller
> > - *TableLevel = PageLevel;
> > -
> > - // Now, we have the Table Level we can get the Block Size associated to this table
> > - *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
> > -
> > - // The last block of the root table depends on the number of entry in this table,
> > - // otherwise it is always the (TT_ENTRY_COUNT - 1)th entry in the table.
> > - *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable,
> > - (PageLevel == RootTableLevel) ? RootTableEntryCount : TT_ENTRY_COUNT);
> > -
> > - return BlockEntry;
> > + return EFI_SUCCESS;
> > }
> >
> > STATIC
> > EFI_STATUS
> > UpdateRegionMapping (
> > - IN UINT64 *RootTable,
> > - IN UINT64 RegionStart,
> > - IN UINT64 RegionLength,
> > - IN UINT64 Attributes,
> > - IN UINT64 BlockEntryMask
> > + IN UINT64 RegionStart,
> > + IN UINT64 RegionSize,
> > + IN UINT64 AttributeSetMask,
> > + IN UINT64 AttributeClearMask
>
> The new indentation here (which arguably is an improvement) sort of
> masks the unchanged input RegionStart and the rename of
> RegionLength->RegionSize. Since it doesn't help anything in this
> series, could it be deferred (or broken out)?
>
I'll revert this change for now (include the name change since it is pointless)
> > )
> > {
> > - UINT32 Type;
> > - UINT64 *BlockEntry;
> > - UINT64 *LastBlockEntry;
> > - UINT64 BlockEntrySize;
> > - UINTN TableLevel;
> > + UINTN RootTableLevel;
> > + UINTN T0SZ;
> >
> > - // Ensure the Length is aligned on 4KB boundary
> > - if ((RegionLength == 0) || ((RegionLength & (SIZE_4KB - 1)) != 0)) {
> > - ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
> > + if ((RegionStart & EFI_PAGE_MASK) != 0 || (RegionSize & EFI_PAGE_MASK) != 0) {
> > return EFI_INVALID_PARAMETER;
> > }
> >
> > - do {
> > - // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
> > - // such as the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
> > - BlockEntrySize = RegionLength;
> > - BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
> > - if (BlockEntry == NULL) {
> > - // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
> > - return EFI_OUT_OF_RESOURCES;
> > - }
> > + T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
> > + GetRootTranslationTableInfo (T0SZ, &RootTableLevel, NULL);
> >
> > - if (TableLevel != 3) {
> > - Type = TT_TYPE_BLOCK_ENTRY;
> > - } else {
> > - Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
> > - }
> > -
> > - do {
> > - // Fill the Block Entry with attribute and output block address
> > - *BlockEntry &= BlockEntryMask;
> > - *BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
> > -
> > - ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);
> > -
> > - // Go to the next BlockEntry
> > - RegionStart += BlockEntrySize;
> > - RegionLength -= BlockEntrySize;
> > - BlockEntry++;
> > -
> > - // Break the inner loop when next block is a table
> > - // Rerun GetBlockEntryListFromAddress to avoid page table memory leak
> > - if (TableLevel != 3 && BlockEntry <= LastBlockEntry &&
> > - (*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
> > - break;
> > - }
> > - } while ((RegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
> > - } while (RegionLength != 0);
> > -
> > - return EFI_SUCCESS;
> > + return UpdateRegionMappingRec (RegionStart, RegionStart + RegionSize,
> > + AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
> > + RootTableLevel);
> > }
> >
> > STATIC
> > @@ -398,7 +303,6 @@ FillTranslationTable (
> > )
> > {
> > return UpdateRegionMapping (
> > - RootTable,
> > MemoryRegion->VirtualBase,
> > MemoryRegion->Length,
> > ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
> > @@ -455,8 +359,6 @@ ArmSetMemoryAttributes (
> > IN UINT64 Attributes
> > )
> > {
> > - EFI_STATUS Status;
> > - UINT64 *TranslationTable;
> > UINT64 PageAttributes;
> > UINT64 PageAttributeMask;
> >
> > @@ -473,19 +375,11 @@ ArmSetMemoryAttributes (
> > TT_PXN_MASK | TT_XN_MASK);
> > }
> >
> > - TranslationTable = ArmGetTTBR0BaseAddress ();
> > -
> > - Status = UpdateRegionMapping (
> > - TranslationTable,
> > + return UpdateRegionMapping (
> > BaseAddress,
> > Length,
> > PageAttributes,
> > PageAttributeMask);
>
> While making a neat and clear diff, this messes up coding style.
>
Yup. Will fix.
> /
> Leif
>
> > - if (EFI_ERROR (Status)) {
> > - return Status;
> > - }
> > -
> > - return EFI_SUCCESS;
> > }
> >
> > STATIC
> > @@ -497,17 +391,7 @@ SetMemoryRegionAttribute (
> > IN UINT64 BlockEntryMask
> > )
> > {
> > - EFI_STATUS Status;
> > - UINT64 *RootTable;
> > -
> > - RootTable = ArmGetTTBR0BaseAddress ();
> > -
> > - Status = UpdateRegionMapping (RootTable, BaseAddress, Length, Attributes, BlockEntryMask);
> > - if (EFI_ERROR (Status)) {
> > - return Status;
> > - }
> > -
> > - return EFI_SUCCESS;
> > + return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
> > }
> >
> > EFI_STATUS
> > --
> > 2.17.1
> >
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2020-03-07 7:15 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-03-06 16:12 [PATCH v3 0/2] ArmPkg/ArmMmuLib: rewrite and improve cache handling with MMU off Ard Biesheuvel
2020-03-06 16:12 ` [PATCH v3 1/2] ArmPkg/ArmMmuLib AARCH64: rewrite page table code Ard Biesheuvel
2020-03-06 18:50 ` Leif Lindholm
2020-03-07 7:15 ` Ard Biesheuvel
2020-03-06 16:12 ` [PATCH v3 2/2] ArmPkg/ArmMmuLib AARCH64: invalidate page tables before populating them Ard Biesheuvel
2020-03-06 18:51 ` Leif Lindholm
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox