public inbox for devel@edk2.groups.io
 help / color / mirror / Atom feed
From: "Boeuf, Sebastien" <sebastien.boeuf@intel.com>
To: devel@edk2.groups.io
Cc: jiewen.yao@intel.com, jordan.l.justen@intel.com,
	kraxel@redhat.com, sebastien.boeuf@intel.com
Subject: [PATCH v5 6/7] OvmfPkg: CloudHv: Rely on PVH memmap instead of CMOS
Date: Tue,  1 Mar 2022 14:29:16 +0100	[thread overview]
Message-ID: <c94df74f77be6bec9726c58535e91e86310c7ea9.1646141266.git.sebastien.boeuf@intel.com> (raw)
In-Reply-To: <cover.1646141266.git.sebastien.boeuf@intel.com>

From: Sebastien Boeuf <sebastien.boeuf@intel.com>

Instead of using the CMOS, the CloudHv platform relies on the list of
memmap entries provided through the PVH boot protocol to determine the
last RAM address below 4G.

Acked-by: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Sebastien Boeuf <sebastien.boeuf@intel.com>
---
 OvmfPkg/PlatformPei/MemDetect.c     | 73 +++++++++++++++++++++++++++++
 OvmfPkg/PlatformPei/PlatformPei.inf |  2 +
 2 files changed, 75 insertions(+)

diff --git a/OvmfPkg/PlatformPei/MemDetect.c b/OvmfPkg/PlatformPei/MemDetect.c
index 1bcb5a08bc..8ecc8257f9 100644
--- a/OvmfPkg/PlatformPei/MemDetect.c
+++ b/OvmfPkg/PlatformPei/MemDetect.c
@@ -17,6 +17,7 @@ Module Name:
 #include <IndustryStandard/I440FxPiix4.h>
 #include <IndustryStandard/Q35MchIch9.h>
 #include <IndustryStandard/CloudHv.h>
+#include <IndustryStandard/Xen/arch-x86/hvm/start_info.h>
 #include <PiPei.h>
 #include <Register/Intel/SmramSaveStateMap.h>
 
@@ -315,6 +316,73 @@ ScanOrAdd64BitE820Ram (
   return EFI_SUCCESS;
 }
 
+/**
+  Returns PVH memmap
+
+  @param Entries      Pointer to PVH memmap
+  @param Count        Number of entries
+
+  @return EFI_STATUS
+**/
+EFI_STATUS
+GetPvhMemmapEntries (
+  struct hvm_memmap_table_entry  **Entries,
+  UINT32                         *Count
+  )
+{
+  UINT32                 *PVHResetVectorData;
+  struct hvm_start_info  *pvh_start_info;
+
+  PVHResetVectorData = (VOID *)(UINTN)PcdGet32 (PcdXenPvhStartOfDayStructPtr);
+  if (PVHResetVectorData == 0) {
+    return EFI_NOT_FOUND;
+  }
+
+  pvh_start_info = (struct hvm_start_info *)(UINTN)PVHResetVectorData[0];
+
+  *Entries = (struct hvm_memmap_table_entry *)(UINTN)pvh_start_info->memmap_paddr;
+  *Count   = pvh_start_info->memmap_entries;
+
+  return EFI_SUCCESS;
+}
+
+STATIC
+UINT64
+GetHighestSystemMemoryAddressFromPvhMemmap (
+  BOOLEAN  Below4gb
+  )
+{
+  struct hvm_memmap_table_entry  *Memmap;
+  UINT32                         MemmapEntriesCount;
+  struct hvm_memmap_table_entry  *Entry;
+  EFI_STATUS                     Status;
+  UINT32                         Loop;
+  UINT64                         HighestAddress;
+  UINT64                         EntryEnd;
+
+  HighestAddress = 0;
+
+  Status = GetPvhMemmapEntries (&Memmap, &MemmapEntriesCount);
+  ASSERT_EFI_ERROR (Status);
+
+  for (Loop = 0; Loop < MemmapEntriesCount; Loop++) {
+    Entry    = Memmap + Loop;
+    EntryEnd = Entry->addr + Entry->size;
+
+    if ((Entry->type == XEN_HVM_MEMMAP_TYPE_RAM) &&
+        (EntryEnd > HighestAddress))
+    {
+      if (Below4gb && (EntryEnd <= BASE_4GB)) {
+        HighestAddress = EntryEnd;
+      } else if (!Below4gb && (EntryEnd >= BASE_4GB)) {
+        HighestAddress = EntryEnd;
+      }
+    }
+  }
+
+  return HighestAddress;
+}
+
 UINT32
 GetSystemMemorySizeBelow4gb (
   VOID
@@ -325,6 +393,11 @@ GetSystemMemorySizeBelow4gb (
   UINT8       Cmos0x34;
   UINT8       Cmos0x35;
 
+  if (mHostBridgeDevId == CLOUDHV_DEVICE_ID) {
+    // Get the information from PVH memmap
+    return (UINT32)GetHighestSystemMemoryAddressFromPvhMemmap (TRUE);
+  }
+
   Status = ScanOrAdd64BitE820Ram (FALSE, &LowerMemorySize, NULL);
   if ((Status == EFI_SUCCESS) && (LowerMemorySize > 0)) {
     return (UINT32)LowerMemorySize;
diff --git a/OvmfPkg/PlatformPei/PlatformPei.inf b/OvmfPkg/PlatformPei/PlatformPei.inf
index 8ef404168c..212aa7b047 100644
--- a/OvmfPkg/PlatformPei/PlatformPei.inf
+++ b/OvmfPkg/PlatformPei/PlatformPei.inf
@@ -91,6 +91,8 @@
   gUefiOvmfPkgTokenSpaceGuid.PcdOvmfDecompressionScratchEnd
   gUefiOvmfPkgTokenSpaceGuid.PcdQ35TsegMbytes
   gUefiOvmfPkgTokenSpaceGuid.PcdQ35SmramAtDefaultSmbase
+  gUefiOvmfPkgTokenSpaceGuid.PcdXenPvhStartOfDayStructPtr
+  gUefiOvmfPkgTokenSpaceGuid.PcdXenPvhStartOfDayStructPtrSize
   gEfiMdePkgTokenSpaceGuid.PcdGuidedExtractHandlerTableAddress
   gEfiMdeModulePkgTokenSpaceGuid.PcdFlashNvStorageFtwSpareSize
   gEfiMdeModulePkgTokenSpaceGuid.PcdFlashNvStorageVariableSize
-- 
2.32.0

---------------------------------------------------------------------
Intel Corporation SAS (French simplified joint stock company)
Registered headquarters: "Les Montalets"- 2, rue de Paris, 
92196 Meudon Cedex, France
Registration Number:  302 456 199 R.C.S. NANTERRE
Capital: 4,572,000 Euros

This e-mail and any attachments may contain confidential material for
the sole use of the intended recipient(s). Any review or distribution
by others is strictly prohibited. If you are not the intended
recipient, please contact the sender and delete all copies.


  parent reply	other threads:[~2022-03-01 13:29 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-01 13:29 [PATCH v5 0/7] CloudHv: Rely on PVH boot specification Boeuf, Sebastien
2022-03-01 13:29 ` [PATCH v5 1/7] OvmfPkg: Make the Xen ELF header generator more flexible Boeuf, Sebastien
2022-03-01 13:29 ` [PATCH v5 2/7] OvmfPkg: Xen: Use a new fdf include for the PVH ELF header Boeuf, Sebastien
2022-03-01 13:29 ` [PATCH v5 3/7] OvmfPkg: Xen: Generate fdf include file from ELF header generator Boeuf, Sebastien
2022-03-01 13:29 ` [PATCH v5 4/7] OvmfPkg: Generate CloudHv as a PVH ELF binary Boeuf, Sebastien
2022-03-02  7:17   ` [edk2-devel] " Gerd Hoffmann
2022-03-02 11:29     ` Boeuf, Sebastien
2022-03-01 13:29 ` [PATCH v5 5/7] OvmfPkg: CloudHv: Retrieve RSDP address from PVH Boeuf, Sebastien
2022-03-01 13:29 ` Boeuf, Sebastien [this message]
2022-03-01 13:29 ` [PATCH v5 7/7] OvmfPkg: CloudHv: Add README Boeuf, Sebastien
2022-03-02  7:17   ` Gerd Hoffmann

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-list from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=c94df74f77be6bec9726c58535e91e86310c7ea9.1646141266.git.sebastien.boeuf@intel.com \
    --to=devel@edk2.groups.io \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox