public inbox for devel@edk2.groups.io
 help / color / mirror / Atom feed
From: "Yuanhao Xie" <yuanhao.xie@intel.com>
To: devel@edk2.groups.io
Cc: Guo Dong <guo.dong@intel.com>, Ray Ni <ray.ni@intel.com>,
	Sean Rhodes <sean@starlabs.systems>,
	James Lu <james.lu@intel.com>, Gua Guo <gua.guo@intel.com>
Subject: [PATCH 1/5] UefiCpuPkg: Duplicate RelocateApLoop for Amd x64 processors.
Date: Tue,  7 Feb 2023 21:49:35 +0800	[thread overview]
Message-ID: <20230207134939.273-2-yuanhao.xie@intel.com> (raw)
In-Reply-To: <20230207134939.273-1-yuanhao.xie@intel.com>

The duplicated variant is to ensure 64-bit Amd processors can follow the
logic of current existing design to avoid crash may caused by the
untested modification.

Cc: Guo Dong <guo.dong@intel.com>
Cc: Ray Ni <ray.ni@intel.com>
Cc: Sean Rhodes <sean@starlabs.systems>
Cc: James Lu <james.lu@intel.com>
Cc: Gua Guo <gua.guo@intel.com>
Signed-off-by: Yuanhao Xie <yuanhao.xie@intel.com>
---
 UefiCpuPkg/Library/MpInitLib/DxeMpLib.c       |  70 +++++---
 UefiCpuPkg/Library/MpInitLib/MpEqu.inc        |  20 ++-
 UefiCpuPkg/Library/MpInitLib/MpLib.h          |  28 +++
 UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm  | 169 ++++++++++++++++++
 UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm |   3 +
 5 files changed, 256 insertions(+), 34 deletions(-)

diff --git a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
index a84e9e33ba..fd94652e5b 100644
--- a/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
+++ b/UefiCpuPkg/Library/MpInitLib/DxeMpLib.c
@@ -1,7 +1,7 @@
 /** @file
   MP initialize support functions for DXE phase.
 
-  Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.<BR>
+  Copyright (c) 2016 - 2022, Intel Corporation. All rights reserved.<BR>
   SPDX-License-Identifier: BSD-2-Clause-Patent
 
 **/
@@ -378,32 +378,44 @@ RelocateApLoop (
   IN OUT VOID  *Buffer
   )
 {
-  CPU_MP_DATA           *CpuMpData;
-  BOOLEAN               MwaitSupport;
-  ASM_RELOCATE_AP_LOOP  AsmRelocateApLoopFunc;
-  UINTN                 ProcessorNumber;
-  UINTN                 StackStart;
+  CPU_MP_DATA                 *CpuMpData;
+  BOOLEAN                     MwaitSupport;
+  ASM_RELOCATE_AP_LOOP        AsmRelocateApLoopFunc;
+  ASM_RELOCATE_AP_LOOP_AMD64  AsmRelocateApLoopFuncAmd64;
+  UINTN                       ProcessorNumber;
+  UINTN                       StackStart;
 
   MpInitLibWhoAmI (&ProcessorNumber);
   CpuMpData    = GetCpuMpData ();
   MwaitSupport = IsMwaitSupport ();
-  if (CpuMpData->UseSevEsAPMethod) {
-    StackStart = CpuMpData->SevEsAPResetStackStart;
+  if (StandardSignatureIsAuthenticAMD () && (sizeof (UINTN) == sizeof (UINT64))) {
+    StackStart = CpuMpData->UseSevEsAPMethod ? CpuMpData->SevEsAPResetStackStart : mReservedTopOfApStack;
+    AsmRelocateApLoopFuncAmd64 = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
+    AsmRelocateApLoopFuncAmd64 (
+      MwaitSupport,
+      CpuMpData->ApTargetCState,
+      CpuMpData->PmCodeSegment,
+      StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
+      (UINTN)&mNumberToFinish,
+      CpuMpData->Pm16CodeSegment,
+      CpuMpData->SevEsAPBuffer,
+      CpuMpData->WakeupBuffer
+      );
   } else {
-    StackStart = mReservedTopOfApStack;
+    StackStart            = mReservedTopOfApStack;
+    AsmRelocateApLoopFunc = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
+    AsmRelocateApLoopFunc (
+      MwaitSupport,
+      CpuMpData->ApTargetCState,
+      CpuMpData->PmCodeSegment,
+      StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
+      (UINTN)&mNumberToFinish,
+      CpuMpData->Pm16CodeSegment,
+      CpuMpData->SevEsAPBuffer,
+      CpuMpData->WakeupBuffer
+      );
   }
 
-  AsmRelocateApLoopFunc = (ASM_RELOCATE_AP_LOOP)(UINTN)mReservedApLoopFunc;
-  AsmRelocateApLoopFunc (
-    MwaitSupport,
-    CpuMpData->ApTargetCState,
-    CpuMpData->PmCodeSegment,
-    StackStart - ProcessorNumber * AP_SAFE_STACK_SIZE,
-    (UINTN)&mNumberToFinish,
-    CpuMpData->Pm16CodeSegment,
-    CpuMpData->SevEsAPBuffer,
-    CpuMpData->WakeupBuffer
-    );
   //
   // It should never reach here
   //
@@ -582,11 +594,19 @@ InitMpGlobalData (
 
   mReservedTopOfApStack = (UINTN)Address + ApSafeBufferSize;
   ASSERT ((mReservedTopOfApStack & (UINTN)(CPU_STACK_ALIGNMENT - 1)) == 0);
-  CopyMem (
-    mReservedApLoopFunc,
-    CpuMpData->AddressMap.RelocateApLoopFuncAddress,
-    CpuMpData->AddressMap.RelocateApLoopFuncSize
-    );
+  if (StandardSignatureIsAuthenticAMD () && (sizeof (UINTN) == sizeof (UINT64))) {
+    CopyMem (
+      mReservedApLoopFunc,
+      CpuMpData->AddressMap.RelocateApLoopFuncAddress,
+      CpuMpData->AddressMap.RelocateApLoopFuncSize
+      );
+  } else {
+    CopyMem (
+      mReservedApLoopFunc,
+      CpuMpData->AddressMap.RelocateApLoopFuncAddressAmd64,
+      CpuMpData->AddressMap.RelocateApLoopFuncSizeAmd64
+      );
+  }
 
   Status = gBS->CreateEvent (
                   EVT_TIMER | EVT_NOTIFY_SIGNAL,
diff --git a/UefiCpuPkg/Library/MpInitLib/MpEqu.inc b/UefiCpuPkg/Library/MpInitLib/MpEqu.inc
index ebadcc6fb3..4d0933d352 100644
--- a/UefiCpuPkg/Library/MpInitLib/MpEqu.inc
+++ b/UefiCpuPkg/Library/MpInitLib/MpEqu.inc
@@ -21,15 +21,17 @@ CPU_SWITCH_STATE_LOADED       equ        2
 ; Equivalent NASM structure of MP_ASSEMBLY_ADDRESS_MAP
 ;
 struc MP_ASSEMBLY_ADDRESS_MAP
-  .RendezvousFunnelAddress       CTYPE_UINTN 1
-  .ModeEntryOffset               CTYPE_UINTN 1
-  .RendezvousFunnelSize          CTYPE_UINTN 1
-  .RelocateApLoopFuncAddress     CTYPE_UINTN 1
-  .RelocateApLoopFuncSize        CTYPE_UINTN 1
-  .ModeTransitionOffset          CTYPE_UINTN 1
-  .SwitchToRealNoNxOffset        CTYPE_UINTN 1
-  .SwitchToRealPM16ModeOffset    CTYPE_UINTN 1
-  .SwitchToRealPM16ModeSize      CTYPE_UINTN 1
+  .RendezvousFunnelAddress          CTYPE_UINTN 1
+  .ModeEntryOffset                  CTYPE_UINTN 1
+  .RendezvousFunnelSize             CTYPE_UINTN 1
+  .RelocateApLoopFuncAddress        CTYPE_UINTN 1
+  .RelocateApLoopFuncSize           CTYPE_UINTN 1
+  .RelocateApLoopFuncAddressAmd64   CTYPE_UINTN 1
+  .RelocateApLoopFuncSizeAmd64      CTYPE_UINTN 1
+  .ModeTransitionOffset             CTYPE_UINTN 1
+  .SwitchToRealNoNxOffset           CTYPE_UINTN 1
+  .SwitchToRealPM16ModeOffset       CTYPE_UINTN 1
+  .SwitchToRealPM16ModeSize         CTYPE_UINTN 1
 endstruc
 
 ;
diff --git a/UefiCpuPkg/Library/MpInitLib/MpLib.h b/UefiCpuPkg/Library/MpInitLib/MpLib.h
index f5086e497e..4f6146e30d 100644
--- a/UefiCpuPkg/Library/MpInitLib/MpLib.h
+++ b/UefiCpuPkg/Library/MpInitLib/MpLib.h
@@ -179,6 +179,8 @@ typedef struct {
   UINTN    RendezvousFunnelSize;
   UINT8    *RelocateApLoopFuncAddress;
   UINTN    RelocateApLoopFuncSize;
+  UINT8    *RelocateApLoopFuncAddressAmd64;
+  UINTN    RelocateApLoopFuncSizeAmd64;
   UINTN    ModeTransitionOffset;
   UINTN    SwitchToRealNoNxOffset;
   UINTN    SwitchToRealPM16ModeOffset;
@@ -373,6 +375,32 @@ typedef
   IN UINTN                   WakeupBuffer
   );
 
+/**
+  Assembly code to place AP into safe loop mode for Amd X64 processors.
+  Place AP into targeted C-State if MONITOR is supported, otherwise
+  place AP into hlt state.
+  Place AP in protected mode if the current is long mode. Due to AP maybe
+  wakeup by some hardware event. It could avoid accessing page table that
+  may not available during booting to OS.
+
+  @param[in] MwaitSupport    TRUE indicates MONITOR is supported.
+                             FALSE indicates MONITOR is not supported.
+  @param[in] ApTargetCState  Target C-State value.
+  @param[in] PmCodeSegment   Protected mode code segment value.
+**/
+typedef
+  VOID
+(EFIAPI *ASM_RELOCATE_AP_LOOP_AMD64)(
+  IN BOOLEAN                 MwaitSupport,
+  IN UINTN                   ApTargetCState,
+  IN UINTN                   PmCodeSegment,
+  IN UINTN                   TopOfApStack,
+  IN UINTN                   NumberToFinish,
+  IN UINTN                   Pm16CodeSegment,
+  IN UINTN                   SevEsAPJumpTable,
+  IN UINTN                   WakeupBuffer
+  );
+
 /**
   Assembly code to get starting address and size of the rendezvous entry for APs.
   Information for fixing a jump instruction in the code is also returned.
diff --git a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
index 7c2469f9c5..b3021bc6ea 100644
--- a/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
+++ b/UefiCpuPkg/Library/MpInitLib/X64/AmdSev.nasm
@@ -346,3 +346,172 @@ PM16Mode:
     iret
 
 SwitchToRealProcEnd:
+;-------------------------------------------------------------------------------------
+;  AsmRelocateApLoopAmd64 (MwaitSupport, ApTargetCState, PmCodeSegment, TopOfApStack, CountTofinish, Pm16CodeSegment, SevEsAPJumpTable, WakeupBuffer);
+;-------------------------------------------------------------------------------------
+
+AsmRelocateApLoopStartAmd64:
+BITS 64
+    cmp        qword [rsp + 56], 0  ; SevEsAPJumpTable
+    je         NoSevEsAmd64
+
+    ;
+    ; Perform some SEV-ES related setup before leaving 64-bit mode
+    ;
+    push       rcx
+    push       rdx
+
+    ;
+    ; Get the RDX reset value using CPUID
+    ;
+    mov        rax, 1
+    cpuid
+    mov        rsi, rax          ; Save off the reset value for RDX
+
+    ;
+    ; Prepare the GHCB for the AP_HLT_LOOP VMGEXIT call
+    ;   - Must be done while in 64-bit long mode so that writes to
+    ;     the GHCB memory will be unencrypted.
+    ;   - No NAE events can be generated once this is set otherwise
+    ;     the AP_RESET_HOLD SW_EXITCODE will be overwritten.
+    ;
+    mov        rcx, 0xc0010130
+    rdmsr                        ; Retrieve current GHCB address
+    shl        rdx, 32
+    or         rdx, rax
+
+    mov        rdi, rdx
+    xor        rax, rax
+    mov        rcx, 0x800
+    shr        rcx, 3
+    rep stosq                    ; Clear the GHCB
+
+    mov        rax, 0x80000004   ; VMGEXIT AP_RESET_HOLD
+    mov        [rdx + 0x390], rax
+    mov        rax, 114          ; Set SwExitCode valid bit
+    bts        [rdx + 0x3f0], rax
+    inc        rax               ; Set SwExitInfo1 valid bit
+    bts        [rdx + 0x3f0], rax
+    inc        rax               ; Set SwExitInfo2 valid bit
+    bts        [rdx + 0x3f0], rax
+
+    pop        rdx
+    pop        rcx
+
+NoSevEsAmd64:
+    cli                          ; Disable interrupt before switching to 32-bit mode
+    mov        rax, [rsp + 40]   ; CountTofinish
+    lock dec   dword [rax]       ; (*CountTofinish)--
+
+    mov        r10, [rsp + 48]   ; Pm16CodeSegment
+    mov        rax, [rsp + 56]   ; SevEsAPJumpTable
+    mov        rbx, [rsp + 64]   ; WakeupBuffer
+    mov        rsp, r9           ; TopOfApStack
+
+    push       rax               ; Save SevEsAPJumpTable
+    push       rbx               ; Save WakeupBuffer
+    push       r10               ; Save Pm16CodeSegment
+    push       rcx               ; Save MwaitSupport
+    push       rdx               ; Save ApTargetCState
+
+    lea        rax, [PmEntryAmd64]    ; rax <- The start address of transition code
+
+    push       r8
+    push       rax
+
+    ;
+    ; Clear R8 - R15, for reset, before going into 32-bit mode
+    ;
+    xor        r8, r8
+    xor        r9, r9
+    xor        r10, r10
+    xor        r11, r11
+    xor        r12, r12
+    xor        r13, r13
+    xor        r14, r14
+    xor        r15, r15
+
+    ;
+    ; Far return into 32-bit mode
+    ;
+o64 retf
+
+BITS 32
+PmEntryAmd64:
+    mov        eax, cr0
+    btr        eax, 31           ; Clear CR0.PG
+    mov        cr0, eax          ; Disable paging and caches
+
+    mov        ecx, 0xc0000080
+    rdmsr
+    and        ah, ~ 1           ; Clear LME
+    wrmsr
+    mov        eax, cr4
+    and        al, ~ (1 << 5)    ; Clear PAE
+    mov        cr4, eax
+
+    pop        edx
+    add        esp, 4
+    pop        ecx,
+    add        esp, 4
+
+MwaitCheckAmd64:
+    cmp        cl, 1              ; Check mwait-monitor support
+    jnz        HltLoopAmd64
+    mov        ebx, edx           ; Save C-State to ebx
+MwaitLoopAmd64:
+    cli
+    mov        eax, esp           ; Set Monitor Address
+    xor        ecx, ecx           ; ecx = 0
+    xor        edx, edx           ; edx = 0
+    monitor
+    mov        eax, ebx           ; Mwait Cx, Target C-State per eax[7:4]
+    shl        eax, 4
+    mwait
+    jmp        MwaitLoopAmd64
+
+HltLoopAmd64:
+    pop        edx                ; PM16CodeSegment
+    add        esp, 4
+    pop        ebx                ; WakeupBuffer
+    add        esp, 4
+    pop        eax                ; SevEsAPJumpTable
+    add        esp, 4
+    cmp        eax, 0             ; Check for SEV-ES
+    je         DoHltAmd64
+
+    cli
+    ;
+    ; SEV-ES is enabled, use VMGEXIT (GHCB information already
+    ; set by caller)
+    ;
+BITS 64
+    rep        vmmcall
+BITS 32
+
+    ;
+    ; Back from VMGEXIT AP_HLT_LOOP
+    ;   Push the FLAGS/CS/IP values to use
+    ;
+    push       word 0x0002        ; EFLAGS
+    xor        ecx, ecx
+    mov        cx, [eax + 2]      ; CS
+    push       cx
+    mov        cx, [eax]          ; IP
+    push       cx
+    push       word 0x0000        ; For alignment, will be discarded
+
+    push       edx
+    push       ebx
+
+    mov        edx, esi           ; Restore RDX reset value
+
+    retf
+
+DoHltAmd64:
+    cli
+    hlt
+    jmp        DoHltAmd64
+
+BITS 64
+AsmRelocateApLoopEndAmd64:
diff --git a/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm b/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
index 5d71995bf8..cd40099ae4 100644
--- a/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
+++ b/UefiCpuPkg/Library/MpInitLib/X64/MpFuncs.nasm
@@ -459,6 +459,9 @@ ASM_PFX(AsmGetAddressMap):
     lea        rax, [AsmRelocateApLoopStart]
     mov        qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddress], rax
     mov        qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSize], AsmRelocateApLoopEnd - AsmRelocateApLoopStart
+    lea        rax, [AsmRelocateApLoopStartAmd64]
+    mov        qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncAddressAmd64], rax
+    mov        qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.RelocateApLoopFuncSizeAmd64], AsmRelocateApLoopEndAmd64 - AsmRelocateApLoopStartAmd64
     mov        qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.ModeTransitionOffset], Flat32Start - RendezvousFunnelProcStart
     mov        qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealNoNxOffset], SwitchToRealProcStart - Flat32Start
     mov        qword [rcx + MP_ASSEMBLY_ADDRESS_MAP.SwitchToRealPM16ModeOffset], PM16Mode - RendezvousFunnelProcStart
-- 
2.36.1.windows.1


  reply	other threads:[~2023-02-07 13:50 UTC|newest]

Thread overview: 17+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-07 13:49 [PATCH 0/5] Put APs in 64 bit mode before handoff to OS Yuanhao Xie
2023-02-07 13:49 ` Yuanhao Xie [this message]
2023-02-07 13:49 ` [PATCH 2/5] UefiCpuPkg: Contiguous memory allocation and code clean-up Yuanhao Xie
2023-02-07 16:40   ` [edk2-devel] " Marvin Häuser
2023-02-07 16:43     ` Marvin Häuser
2023-02-08 10:36     ` Yuanhao Xie
2023-02-08 11:09   ` Gerd Hoffmann
2023-02-10  9:12     ` Yuanhao Xie
2023-02-10 11:09       ` Gerd Hoffmann
2023-02-10 12:52         ` Ni, Ray
2023-02-07 13:49 ` [PATCH 3/5] OvmfPkg: Add CpuPageTableLib required by MpInitLib Yuanhao Xie
2023-02-08 10:53   ` Gerd Hoffmann
2023-02-09 16:30     ` [edk2-devel] " Ard Biesheuvel
2023-02-07 13:49 ` [PATCH 4/5] UefiPayloadPkg: " Yuanhao Xie
2023-02-08  5:38   ` Guo, Gua
2023-02-08 17:02   ` Guo Dong
2023-02-07 13:49 ` [PATCH 5/5] UefiCpuPkg: Put APs in 64 bit mode before handoff to OS Yuanhao Xie

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-list from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230207134939.273-2-yuanhao.xie@intel.com \
    --to=devel@edk2.groups.io \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox