From: "Min Xu" <min.m.xu@intel.com>
To: devel@edk2.groups.io
Cc: Min M Xu <min.m.xu@intel.com>,
Brijesh Singh <brijesh.singh@amd.com>,
Erdem Aktas <erdemaktas@google.com>,
Gerd Hoffmann <kraxel@redhat.com>,
James Bottomley <jejb@linux.ibm.com>,
Jiewen Yao <jiewen.yao@intel.com>,
Tom Lendacky <thomas.lendacky@amd.com>
Subject: [PATCH V2 2/9] OvmfPkg: Implement CcExitLib
Date: Fri, 4 Nov 2022 07:19:48 +0800 [thread overview]
Message-ID: <20221103231955.1365-3-min.m.xu@intel.com> (raw)
In-Reply-To: <20221103231955.1365-1-min.m.xu@intel.com>
From: Min M Xu <min.m.xu@intel.com>
https://bugzilla.tianocore.org/show_bug.cgi?id=4123
The base CcExitLib library provides a default limited interface. As it
does not provide full support, create an OVMF version of this library to
begin the process of providing full support of Cc guest (such as SEV-ES,
TDX) within OVMF.
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Erdem Aktas <erdemaktas@google.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: James Bottomley <jejb@linux.ibm.com>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Min Xu <min.m.xu@intel.com>
---
OvmfPkg/Library/CcExitLib/CcExitLib.c | 239 ++
OvmfPkg/Library/CcExitLib/CcExitLib.inf | 46 +
OvmfPkg/Library/CcExitLib/CcExitTd.h | 32 +
OvmfPkg/Library/CcExitLib/CcExitVcHandler.c | 2355 +++++++++++++++++
OvmfPkg/Library/CcExitLib/CcExitVcHandler.h | 53 +
OvmfPkg/Library/CcExitLib/CcExitVeHandler.c | 559 ++++
.../Library/CcExitLib/PeiDxeCcExitVcHandler.c | 103 +
OvmfPkg/Library/CcExitLib/SecCcExitLib.inf | 48 +
.../Library/CcExitLib/SecCcExitVcHandler.c | 109 +
.../Library/CcExitLib/X64/TdVmcallCpuid.nasm | 146 +
10 files changed, 3690 insertions(+)
create mode 100644 OvmfPkg/Library/CcExitLib/CcExitLib.c
create mode 100644 OvmfPkg/Library/CcExitLib/CcExitLib.inf
create mode 100644 OvmfPkg/Library/CcExitLib/CcExitTd.h
create mode 100644 OvmfPkg/Library/CcExitLib/CcExitVcHandler.c
create mode 100644 OvmfPkg/Library/CcExitLib/CcExitVcHandler.h
create mode 100644 OvmfPkg/Library/CcExitLib/CcExitVeHandler.c
create mode 100644 OvmfPkg/Library/CcExitLib/PeiDxeCcExitVcHandler.c
create mode 100644 OvmfPkg/Library/CcExitLib/SecCcExitLib.inf
create mode 100644 OvmfPkg/Library/CcExitLib/SecCcExitVcHandler.c
create mode 100644 OvmfPkg/Library/CcExitLib/X64/TdVmcallCpuid.nasm
diff --git a/OvmfPkg/Library/CcExitLib/CcExitLib.c b/OvmfPkg/Library/CcExitLib/CcExitLib.c
new file mode 100644
index 000000000000..2e9ce141f33b
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/CcExitLib.c
@@ -0,0 +1,239 @@
+/** @file
+ CcExitLib Support Library.
+
+ Copyright (C) 2020, Advanced Micro Devices, Inc. All rights reserved.<BR>
+ Copyright (C) 2020 - 2022, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Base.h>
+#include <Uefi.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/CcExitLib.h>
+#include <Register/Amd/Msr.h>
+
+/**
+ Check for VMGEXIT error
+
+ Check if the hypervisor has returned an error after completion of the VMGEXIT
+ by examining the SwExitInfo1 field of the GHCB.
+
+ @param[in] Ghcb A pointer to the GHCB
+
+ @retval 0 VMGEXIT succeeded.
+ @return Exception number to be propagated, VMGEXIT processing
+ did not succeed.
+
+**/
+STATIC
+UINT64
+VmgExitErrorCheck (
+ IN GHCB *Ghcb
+ )
+{
+ GHCB_EVENT_INJECTION Event;
+ GHCB_EXIT_INFO ExitInfo;
+ UINT64 Status;
+
+ ExitInfo.Uint64 = Ghcb->SaveArea.SwExitInfo1;
+ ASSERT (
+ (ExitInfo.Elements.Lower32Bits == 0) ||
+ (ExitInfo.Elements.Lower32Bits == 1)
+ );
+
+ Status = 0;
+ if (ExitInfo.Elements.Lower32Bits == 0) {
+ return Status;
+ }
+
+ if (ExitInfo.Elements.Lower32Bits == 1) {
+ ASSERT (Ghcb->SaveArea.SwExitInfo2 != 0);
+
+ //
+ // Check that the return event is valid
+ //
+ Event.Uint64 = Ghcb->SaveArea.SwExitInfo2;
+ if (Event.Elements.Valid &&
+ (Event.Elements.Type == GHCB_EVENT_INJECTION_TYPE_EXCEPTION))
+ {
+ switch (Event.Elements.Vector) {
+ case GP_EXCEPTION:
+ case UD_EXCEPTION:
+ //
+ // Use returned event as return code
+ //
+ Status = Event.Uint64;
+ }
+ }
+ }
+
+ if (Status == 0) {
+ GHCB_EVENT_INJECTION GpEvent;
+
+ GpEvent.Uint64 = 0;
+ GpEvent.Elements.Vector = GP_EXCEPTION;
+ GpEvent.Elements.Type = GHCB_EVENT_INJECTION_TYPE_EXCEPTION;
+ GpEvent.Elements.Valid = 1;
+
+ Status = GpEvent.Uint64;
+ }
+
+ return Status;
+}
+
+/**
+ Perform VMGEXIT.
+
+ Sets the necessary fields of the GHCB, invokes the VMGEXIT instruction and
+ then handles the return actions.
+
+ @param[in, out] Ghcb A pointer to the GHCB
+ @param[in] ExitCode VMGEXIT code to be assigned to the SwExitCode
+ field of the GHCB.
+ @param[in] ExitInfo1 VMGEXIT information to be assigned to the
+ SwExitInfo1 field of the GHCB.
+ @param[in] ExitInfo2 VMGEXIT information to be assigned to the
+ SwExitInfo2 field of the GHCB.
+
+ @retval 0 VMGEXIT succeeded.
+ @return Exception number to be propagated, VMGEXIT
+ processing did not succeed.
+
+**/
+UINT64
+EFIAPI
+CcExitLibVmgExit (
+ IN OUT GHCB *Ghcb,
+ IN UINT64 ExitCode,
+ IN UINT64 ExitInfo1,
+ IN UINT64 ExitInfo2
+ )
+{
+ Ghcb->SaveArea.SwExitCode = ExitCode;
+ Ghcb->SaveArea.SwExitInfo1 = ExitInfo1;
+ Ghcb->SaveArea.SwExitInfo2 = ExitInfo2;
+
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwExitCode);
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwExitInfo1);
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwExitInfo2);
+
+ //
+ // Guest memory is used for the guest-hypervisor communication, so fence
+ // the invocation of the VMGEXIT instruction to ensure GHCB accesses are
+ // synchronized properly.
+ //
+ MemoryFence ();
+ AsmVmgExit ();
+ MemoryFence ();
+
+ return VmgExitErrorCheck (Ghcb);
+}
+
+/**
+ Perform pre-VMGEXIT initialization/preparation.
+
+ Performs the necessary steps in preparation for invoking VMGEXIT. Must be
+ called before setting any fields within the GHCB.
+
+ @param[in, out] Ghcb A pointer to the GHCB
+ @param[in, out] InterruptState A pointer to hold the current interrupt
+ state, used for restoring in CcExitLibVmgDone ()
+
+**/
+VOID
+EFIAPI
+CcExitLibVmgInit (
+ IN OUT GHCB *Ghcb,
+ IN OUT BOOLEAN *InterruptState
+ )
+{
+ //
+ // Be sure that an interrupt can't cause a #VC while the GHCB is
+ // being used.
+ //
+ *InterruptState = GetInterruptState ();
+ if (*InterruptState) {
+ DisableInterrupts ();
+ }
+
+ SetMem (&Ghcb->SaveArea, sizeof (Ghcb->SaveArea), 0);
+}
+
+/**
+ Perform post-VMGEXIT cleanup.
+
+ Performs the necessary steps to cleanup after invoking VMGEXIT. Must be
+ called after obtaining needed fields within the GHCB.
+
+ @param[in, out] Ghcb A pointer to the GHCB
+ @param[in] InterruptState An indicator to conditionally (re)enable
+ interrupts
+
+**/
+VOID
+EFIAPI
+CcExitLibVmgDone (
+ IN OUT GHCB *Ghcb,
+ IN BOOLEAN InterruptState
+ )
+{
+ if (InterruptState) {
+ EnableInterrupts ();
+ }
+}
+
+/**
+ Marks a field at the specified offset as valid in the GHCB.
+
+ The ValidBitmap area represents the areas of the GHCB that have been marked
+ valid. Set the bit in ValidBitmap for the input offset.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication Block
+ @param[in] Offset Qword offset in the GHCB to mark valid
+
+**/
+VOID
+EFIAPI
+CcExitLibVmgSetOffsetValid (
+ IN OUT GHCB *Ghcb,
+ IN GHCB_REGISTER Offset
+ )
+{
+ UINT32 OffsetIndex;
+ UINT32 OffsetBit;
+
+ OffsetIndex = Offset / 8;
+ OffsetBit = Offset % 8;
+
+ Ghcb->SaveArea.ValidBitmap[OffsetIndex] |= (1 << OffsetBit);
+}
+
+/**
+ Checks if a specified offset is valid in the GHCB.
+
+ The ValidBitmap area represents the areas of the GHCB that have been marked
+ valid. Return whether the bit in the ValidBitmap is set for the input offset.
+
+ @param[in] Ghcb A pointer to the GHCB
+ @param[in] Offset Qword offset in the GHCB to mark valid
+
+ @retval TRUE Offset is marked valid in the GHCB
+ @retval FALSE Offset is not marked valid in the GHCB
+
+**/
+BOOLEAN
+EFIAPI
+CcExitLibVmgIsOffsetValid (
+ IN GHCB *Ghcb,
+ IN GHCB_REGISTER Offset
+ )
+{
+ UINT32 OffsetIndex;
+ UINT32 OffsetBit;
+
+ OffsetIndex = Offset / 8;
+ OffsetBit = Offset % 8;
+
+ return ((Ghcb->SaveArea.ValidBitmap[OffsetIndex] & (1 << OffsetBit)) != 0);
+}
diff --git a/OvmfPkg/Library/CcExitLib/CcExitLib.inf b/OvmfPkg/Library/CcExitLib/CcExitLib.inf
new file mode 100644
index 000000000000..85906c831fbc
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/CcExitLib.inf
@@ -0,0 +1,46 @@
+## @file
+# CcExitLib Library.
+#
+# Copyright (C) 2020, Advanced Micro Devices, Inc. All rights reserved.<BR>
+# Copyright (C) 2020 - 2022, Intel Corporation. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = CcExitLib
+ FILE_GUID = 6edfe409-72d0-4574-adcd-78c972d8fd87
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = CcExitLib|PEIM DXE_CORE DXE_DRIVER DXE_RUNTIME_DRIVER DXE_SMM_DRIVER UEFI_DRIVER
+
+#
+# The following information is for reference only and not required by the build tools.
+#
+# VALID_ARCHITECTURES = X64
+#
+
+[Sources.common]
+ CcExitLib.c
+ CcExitVcHandler.c
+ CcExitVcHandler.h
+ PeiDxeCcExitVcHandler.c
+ CcExitVeHandler.c
+ X64/TdVmcallCpuid.nasm
+
+[Packages]
+ MdePkg/MdePkg.dec
+ OvmfPkg/OvmfPkg.dec
+ UefiCpuPkg/UefiCpuPkg.dec
+
+[LibraryClasses]
+ BaseLib
+ BaseMemoryLib
+ DebugLib
+ LocalApicLib
+ MemEncryptSevLib
+
+[Pcd]
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize
diff --git a/OvmfPkg/Library/CcExitLib/CcExitTd.h b/OvmfPkg/Library/CcExitLib/CcExitTd.h
new file mode 100644
index 000000000000..4afc5794d226
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/CcExitTd.h
@@ -0,0 +1,32 @@
+/** @file
+
+ Copyright (c) 2020 - 2021, Intel Corporation. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef CC_EXIT_TD_H_
+#define CC_EXIT_TD_H_
+
+#include <Base.h>
+#include <Uefi.h>
+
+/**
+ This function enable the TD guest to request the VMM to emulate CPUID
+ operation, especially for non-architectural, CPUID leaves.
+
+ @param[in] Eax Main leaf of the CPUID
+ @param[in] Ecx Sub-leaf of the CPUID
+ @param[out] Results Returned result of CPUID operation
+
+ @return EFI_SUCCESS
+**/
+EFI_STATUS
+EFIAPI
+CcExitLibTdVmCallCpuid (
+ IN UINT64 Eax,
+ IN UINT64 Ecx,
+ OUT VOID *Results
+ );
+
+#endif
diff --git a/OvmfPkg/Library/CcExitLib/CcExitVcHandler.c b/OvmfPkg/Library/CcExitLib/CcExitVcHandler.c
new file mode 100644
index 000000000000..c4a0284d1b5e
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/CcExitVcHandler.c
@@ -0,0 +1,2355 @@
+/** @file
+ X64 #VC Exception Handler functon.
+
+ Copyright (C) 2020, Advanced Micro Devices, Inc. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Base.h>
+#include <Uefi.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/LocalApicLib.h>
+#include <Library/MemEncryptSevLib.h>
+#include <Library/CcExitLib.h>
+#include <Register/Amd/Msr.h>
+#include <Register/Intel/Cpuid.h>
+#include <IndustryStandard/InstructionParsing.h>
+
+#include "CcExitVcHandler.h"
+
+//
+// Instruction execution mode definition
+//
+typedef enum {
+ LongMode64Bit = 0,
+ LongModeCompat32Bit,
+ LongModeCompat16Bit,
+} SEV_ES_INSTRUCTION_MODE;
+
+//
+// Instruction size definition (for operand and address)
+//
+typedef enum {
+ Size8Bits = 0,
+ Size16Bits,
+ Size32Bits,
+ Size64Bits,
+} SEV_ES_INSTRUCTION_SIZE;
+
+//
+// Intruction segment definition
+//
+typedef enum {
+ SegmentEs = 0,
+ SegmentCs,
+ SegmentSs,
+ SegmentDs,
+ SegmentFs,
+ SegmentGs,
+} SEV_ES_INSTRUCTION_SEGMENT;
+
+//
+// Instruction rep function definition
+//
+typedef enum {
+ RepNone = 0,
+ RepZ,
+ RepNZ,
+} SEV_ES_INSTRUCTION_REP;
+
+typedef struct {
+ UINT8 Rm;
+ UINT8 Reg;
+ UINT8 Mod;
+} SEV_ES_INSTRUCTION_MODRM_EXT;
+
+typedef struct {
+ UINT8 Base;
+ UINT8 Index;
+ UINT8 Scale;
+} SEV_ES_INSTRUCTION_SIB_EXT;
+
+//
+// Instruction opcode definition
+//
+typedef struct {
+ SEV_ES_INSTRUCTION_MODRM_EXT ModRm;
+
+ SEV_ES_INSTRUCTION_SIB_EXT Sib;
+
+ UINTN RegData;
+ UINTN RmData;
+} SEV_ES_INSTRUCTION_OPCODE_EXT;
+
+//
+// Instruction parsing context definition
+//
+typedef struct {
+ GHCB *Ghcb;
+
+ SEV_ES_INSTRUCTION_MODE Mode;
+ SEV_ES_INSTRUCTION_SIZE DataSize;
+ SEV_ES_INSTRUCTION_SIZE AddrSize;
+ BOOLEAN SegmentSpecified;
+ SEV_ES_INSTRUCTION_SEGMENT Segment;
+ SEV_ES_INSTRUCTION_REP RepMode;
+
+ UINT8 *Begin;
+ UINT8 *End;
+
+ UINT8 *Prefixes;
+ UINT8 *OpCodes;
+ UINT8 *Displacement;
+ UINT8 *Immediate;
+
+ INSTRUCTION_REX_PREFIX RexPrefix;
+
+ BOOLEAN ModRmPresent;
+ INSTRUCTION_MODRM ModRm;
+
+ BOOLEAN SibPresent;
+ INSTRUCTION_SIB Sib;
+
+ UINTN PrefixSize;
+ UINTN OpCodeSize;
+ UINTN DisplacementSize;
+ UINTN ImmediateSize;
+
+ SEV_ES_INSTRUCTION_OPCODE_EXT Ext;
+} SEV_ES_INSTRUCTION_DATA;
+
+//
+// Non-automatic Exit function prototype
+//
+typedef
+UINT64
+(*NAE_EXIT) (
+ GHCB *Ghcb,
+ EFI_SYSTEM_CONTEXT_X64 *Regs,
+ SEV_ES_INSTRUCTION_DATA *InstructionData
+ );
+
+//
+// SEV-SNP Cpuid table entry/function
+//
+typedef PACKED struct {
+ UINT32 EaxIn;
+ UINT32 EcxIn;
+ UINT64 Unused;
+ UINT64 Unused2;
+ UINT32 Eax;
+ UINT32 Ebx;
+ UINT32 Ecx;
+ UINT32 Edx;
+ UINT64 Reserved;
+} SEV_SNP_CPUID_FUNCTION;
+
+//
+// SEV-SNP Cpuid page format
+//
+typedef PACKED struct {
+ UINT32 Count;
+ UINT32 Reserved1;
+ UINT64 Reserved2;
+ SEV_SNP_CPUID_FUNCTION function[0];
+} SEV_SNP_CPUID_INFO;
+
+/**
+ Return a pointer to the contents of the specified register.
+
+ Based upon the input register, return a pointer to the registers contents
+ in the x86 processor context.
+
+ @param[in] Regs x64 processor context
+ @param[in] Register Register to obtain pointer for
+
+ @return Pointer to the contents of the requested register
+
+**/
+STATIC
+UINT64 *
+GetRegisterPointer (
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN UINT8 Register
+ )
+{
+ UINT64 *Reg;
+
+ switch (Register) {
+ case 0:
+ Reg = &Regs->Rax;
+ break;
+ case 1:
+ Reg = &Regs->Rcx;
+ break;
+ case 2:
+ Reg = &Regs->Rdx;
+ break;
+ case 3:
+ Reg = &Regs->Rbx;
+ break;
+ case 4:
+ Reg = &Regs->Rsp;
+ break;
+ case 5:
+ Reg = &Regs->Rbp;
+ break;
+ case 6:
+ Reg = &Regs->Rsi;
+ break;
+ case 7:
+ Reg = &Regs->Rdi;
+ break;
+ case 8:
+ Reg = &Regs->R8;
+ break;
+ case 9:
+ Reg = &Regs->R9;
+ break;
+ case 10:
+ Reg = &Regs->R10;
+ break;
+ case 11:
+ Reg = &Regs->R11;
+ break;
+ case 12:
+ Reg = &Regs->R12;
+ break;
+ case 13:
+ Reg = &Regs->R13;
+ break;
+ case 14:
+ Reg = &Regs->R14;
+ break;
+ case 15:
+ Reg = &Regs->R15;
+ break;
+ default:
+ Reg = NULL;
+ }
+
+ ASSERT (Reg != NULL);
+
+ return Reg;
+}
+
+/**
+ Update the instruction parsing context for displacement bytes.
+
+ @param[in, out] InstructionData Instruction parsing context
+ @param[in] Size The instruction displacement size
+
+**/
+STATIC
+VOID
+UpdateForDisplacement (
+ IN OUT SEV_ES_INSTRUCTION_DATA *InstructionData,
+ IN UINTN Size
+ )
+{
+ InstructionData->DisplacementSize = Size;
+ InstructionData->Immediate += Size;
+ InstructionData->End += Size;
+}
+
+/**
+ Determine if an instruction address if RIP relative.
+
+ Examine the instruction parsing context to determine if the address offset
+ is relative to the instruction pointer.
+
+ @param[in] InstructionData Instruction parsing context
+
+ @retval TRUE Instruction addressing is RIP relative
+ @retval FALSE Instruction addressing is not RIP relative
+
+**/
+STATIC
+BOOLEAN
+IsRipRelative (
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ SEV_ES_INSTRUCTION_OPCODE_EXT *Ext;
+
+ Ext = &InstructionData->Ext;
+
+ return ((InstructionData->Mode == LongMode64Bit) &&
+ (Ext->ModRm.Mod == 0) &&
+ (Ext->ModRm.Rm == 5) &&
+ (InstructionData->SibPresent == FALSE));
+}
+
+/**
+ Return the effective address of a memory operand.
+
+ Examine the instruction parsing context to obtain the effective memory
+ address of a memory operand.
+
+ @param[in] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @return The memory operand effective address
+
+**/
+STATIC
+UINT64
+GetEffectiveMemoryAddress (
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ SEV_ES_INSTRUCTION_OPCODE_EXT *Ext;
+ UINT64 EffectiveAddress;
+
+ Ext = &InstructionData->Ext;
+ EffectiveAddress = 0;
+
+ if (IsRipRelative (InstructionData)) {
+ //
+ // RIP-relative displacement is a 32-bit signed value
+ //
+ INT32 RipRelative;
+
+ RipRelative = *(INT32 *)InstructionData->Displacement;
+
+ UpdateForDisplacement (InstructionData, 4);
+
+ //
+ // Negative displacement is handled by standard UINT64 wrap-around.
+ //
+ return Regs->Rip + (UINT64)RipRelative;
+ }
+
+ switch (Ext->ModRm.Mod) {
+ case 1:
+ UpdateForDisplacement (InstructionData, 1);
+ EffectiveAddress += (UINT64)(*(INT8 *)(InstructionData->Displacement));
+ break;
+ case 2:
+ switch (InstructionData->AddrSize) {
+ case Size16Bits:
+ UpdateForDisplacement (InstructionData, 2);
+ EffectiveAddress += (UINT64)(*(INT16 *)(InstructionData->Displacement));
+ break;
+ default:
+ UpdateForDisplacement (InstructionData, 4);
+ EffectiveAddress += (UINT64)(*(INT32 *)(InstructionData->Displacement));
+ break;
+ }
+
+ break;
+ }
+
+ if (InstructionData->SibPresent) {
+ INT64 Displacement;
+
+ if (Ext->Sib.Index != 4) {
+ CopyMem (
+ &Displacement,
+ GetRegisterPointer (Regs, Ext->Sib.Index),
+ sizeof (Displacement)
+ );
+ Displacement *= (INT64)(1 << Ext->Sib.Scale);
+
+ //
+ // Negative displacement is handled by standard UINT64 wrap-around.
+ //
+ EffectiveAddress += (UINT64)Displacement;
+ }
+
+ if ((Ext->Sib.Base != 5) || Ext->ModRm.Mod) {
+ EffectiveAddress += *GetRegisterPointer (Regs, Ext->Sib.Base);
+ } else {
+ UpdateForDisplacement (InstructionData, 4);
+ EffectiveAddress += (UINT64)(*(INT32 *)(InstructionData->Displacement));
+ }
+ } else {
+ EffectiveAddress += *GetRegisterPointer (Regs, Ext->ModRm.Rm);
+ }
+
+ return EffectiveAddress;
+}
+
+/**
+ Decode a ModRM byte.
+
+ Examine the instruction parsing context to decode a ModRM byte and the SIB
+ byte, if present.
+
+ @param[in] Regs x64 processor context
+ @param[in, out] InstructionData Instruction parsing context
+
+**/
+STATIC
+VOID
+DecodeModRm (
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN OUT SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ SEV_ES_INSTRUCTION_OPCODE_EXT *Ext;
+ INSTRUCTION_REX_PREFIX *RexPrefix;
+ INSTRUCTION_MODRM *ModRm;
+ INSTRUCTION_SIB *Sib;
+
+ RexPrefix = &InstructionData->RexPrefix;
+ Ext = &InstructionData->Ext;
+ ModRm = &InstructionData->ModRm;
+ Sib = &InstructionData->Sib;
+
+ InstructionData->ModRmPresent = TRUE;
+ ModRm->Uint8 = *(InstructionData->End);
+
+ InstructionData->Displacement++;
+ InstructionData->Immediate++;
+ InstructionData->End++;
+
+ Ext->ModRm.Mod = ModRm->Bits.Mod;
+ Ext->ModRm.Reg = (RexPrefix->Bits.BitR << 3) | ModRm->Bits.Reg;
+ Ext->ModRm.Rm = (RexPrefix->Bits.BitB << 3) | ModRm->Bits.Rm;
+
+ Ext->RegData = *GetRegisterPointer (Regs, Ext->ModRm.Reg);
+
+ if (Ext->ModRm.Mod == 3) {
+ Ext->RmData = *GetRegisterPointer (Regs, Ext->ModRm.Rm);
+ } else {
+ if (ModRm->Bits.Rm == 4) {
+ InstructionData->SibPresent = TRUE;
+ Sib->Uint8 = *(InstructionData->End);
+
+ InstructionData->Displacement++;
+ InstructionData->Immediate++;
+ InstructionData->End++;
+
+ Ext->Sib.Scale = Sib->Bits.Scale;
+ Ext->Sib.Index = (RexPrefix->Bits.BitX << 3) | Sib->Bits.Index;
+ Ext->Sib.Base = (RexPrefix->Bits.BitB << 3) | Sib->Bits.Base;
+ }
+
+ Ext->RmData = GetEffectiveMemoryAddress (Regs, InstructionData);
+ }
+}
+
+/**
+ Decode instruction prefixes.
+
+ Parse the instruction data to track the instruction prefixes that have
+ been used.
+
+ @param[in] Regs x64 processor context
+ @param[in, out] InstructionData Instruction parsing context
+
+**/
+STATIC
+VOID
+DecodePrefixes (
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN OUT SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ SEV_ES_INSTRUCTION_MODE Mode;
+ SEV_ES_INSTRUCTION_SIZE ModeDataSize;
+ SEV_ES_INSTRUCTION_SIZE ModeAddrSize;
+ UINT8 *Byte;
+
+ //
+ // Always in 64-bit mode
+ //
+ Mode = LongMode64Bit;
+ ModeDataSize = Size32Bits;
+ ModeAddrSize = Size64Bits;
+
+ InstructionData->Mode = Mode;
+ InstructionData->DataSize = ModeDataSize;
+ InstructionData->AddrSize = ModeAddrSize;
+
+ InstructionData->Prefixes = InstructionData->Begin;
+
+ Byte = InstructionData->Prefixes;
+ for ( ; ; Byte++, InstructionData->PrefixSize++) {
+ //
+ // Check the 0x40 to 0x4F range using an if statement here since some
+ // compilers don't like the "case 0x40 ... 0x4F:" syntax. This avoids
+ // 16 case statements below.
+ //
+ if ((*Byte >= REX_PREFIX_START) && (*Byte <= REX_PREFIX_STOP)) {
+ InstructionData->RexPrefix.Uint8 = *Byte;
+ if ((*Byte & REX_64BIT_OPERAND_SIZE_MASK) != 0) {
+ InstructionData->DataSize = Size64Bits;
+ }
+
+ continue;
+ }
+
+ switch (*Byte) {
+ case OVERRIDE_SEGMENT_CS:
+ case OVERRIDE_SEGMENT_DS:
+ case OVERRIDE_SEGMENT_ES:
+ case OVERRIDE_SEGMENT_SS:
+ if (Mode != LongMode64Bit) {
+ InstructionData->SegmentSpecified = TRUE;
+ InstructionData->Segment = (*Byte >> 3) & 3;
+ }
+
+ break;
+
+ case OVERRIDE_SEGMENT_FS:
+ case OVERRIDE_SEGMENT_GS:
+ InstructionData->SegmentSpecified = TRUE;
+ InstructionData->Segment = *Byte & 7;
+ break;
+
+ case OVERRIDE_OPERAND_SIZE:
+ if (InstructionData->RexPrefix.Uint8 == 0) {
+ InstructionData->DataSize =
+ (Mode == LongMode64Bit) ? Size16Bits :
+ (Mode == LongModeCompat32Bit) ? Size16Bits :
+ (Mode == LongModeCompat16Bit) ? Size32Bits : 0;
+ }
+
+ break;
+
+ case OVERRIDE_ADDRESS_SIZE:
+ InstructionData->AddrSize =
+ (Mode == LongMode64Bit) ? Size32Bits :
+ (Mode == LongModeCompat32Bit) ? Size16Bits :
+ (Mode == LongModeCompat16Bit) ? Size32Bits : 0;
+ break;
+
+ case LOCK_PREFIX:
+ break;
+
+ case REPZ_PREFIX:
+ InstructionData->RepMode = RepZ;
+ break;
+
+ case REPNZ_PREFIX:
+ InstructionData->RepMode = RepNZ;
+ break;
+
+ default:
+ InstructionData->OpCodes = Byte;
+ InstructionData->OpCodeSize = (*Byte == TWO_BYTE_OPCODE_ESCAPE) ? 2 : 1;
+
+ InstructionData->End = Byte + InstructionData->OpCodeSize;
+ InstructionData->Displacement = InstructionData->End;
+ InstructionData->Immediate = InstructionData->End;
+ return;
+ }
+ }
+}
+
+/**
+ Determine instruction length
+
+ Return the total length of the parsed instruction.
+
+ @param[in] InstructionData Instruction parsing context
+
+ @return Length of parsed instruction
+
+**/
+STATIC
+UINT64
+InstructionLength (
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ return (UINT64)(InstructionData->End - InstructionData->Begin);
+}
+
+/**
+ Initialize the instruction parsing context.
+
+ Initialize the instruction parsing context, which includes decoding the
+ instruction prefixes.
+
+ @param[in, out] InstructionData Instruction parsing context
+ @param[in] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in] Regs x64 processor context
+
+**/
+STATIC
+VOID
+InitInstructionData (
+ IN OUT SEV_ES_INSTRUCTION_DATA *InstructionData,
+ IN GHCB *Ghcb,
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs
+ )
+{
+ SetMem (InstructionData, sizeof (*InstructionData), 0);
+ InstructionData->Ghcb = Ghcb;
+ InstructionData->Begin = (UINT8 *)Regs->Rip;
+ InstructionData->End = (UINT8 *)Regs->Rip;
+
+ DecodePrefixes (Regs, InstructionData);
+}
+
+/**
+ Report an unsupported event to the hypervisor
+
+ Use the VMGEXIT support to report an unsupported event to the hypervisor.
+
+ @param[in] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+UnsupportedExit (
+ IN GHCB *Ghcb,
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 Status;
+
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_UNSUPPORTED, Regs->ExceptionData, 0);
+ if (Status == 0) {
+ GHCB_EVENT_INJECTION Event;
+
+ Event.Uint64 = 0;
+ Event.Elements.Vector = GP_EXCEPTION;
+ Event.Elements.Type = GHCB_EVENT_INJECTION_TYPE_EXCEPTION;
+ Event.Elements.Valid = 1;
+
+ Status = Event.Uint64;
+ }
+
+ return Status;
+}
+
+/**
+ Validate that the MMIO memory access is not to encrypted memory.
+
+ Examine the pagetable entry for the memory specified. MMIO should not be
+ performed against encrypted memory. MMIO to the APIC page is always allowed.
+
+ @param[in] Ghcb Pointer to the Guest-Hypervisor Communication Block
+ @param[in] MemoryAddress Memory address to validate
+ @param[in] MemoryLength Memory length to validate
+
+ @retval 0 Memory is not encrypted
+ @return New exception value to propogate
+
+**/
+STATIC
+UINT64
+ValidateMmioMemory (
+ IN GHCB *Ghcb,
+ IN UINTN MemoryAddress,
+ IN UINTN MemoryLength
+ )
+{
+ MEM_ENCRYPT_SEV_ADDRESS_RANGE_STATE State;
+ GHCB_EVENT_INJECTION GpEvent;
+ UINTN Address;
+
+ //
+ // Allow APIC accesses (which will have the encryption bit set during
+ // SEC and PEI phases).
+ //
+ Address = MemoryAddress & ~(SIZE_4KB - 1);
+ if (Address == GetLocalApicBaseAddress ()) {
+ return 0;
+ }
+
+ State = MemEncryptSevGetAddressRangeState (
+ 0,
+ MemoryAddress,
+ MemoryLength
+ );
+ if (State == MemEncryptSevAddressRangeUnencrypted) {
+ return 0;
+ }
+
+ //
+ // Any state other than unencrypted is an error, issue a #GP.
+ //
+ DEBUG ((
+ DEBUG_ERROR,
+ "MMIO using encrypted memory: %lx\n",
+ (UINT64)MemoryAddress
+ ));
+ GpEvent.Uint64 = 0;
+ GpEvent.Elements.Vector = GP_EXCEPTION;
+ GpEvent.Elements.Type = GHCB_EVENT_INJECTION_TYPE_EXCEPTION;
+ GpEvent.Elements.Valid = 1;
+
+ return GpEvent.Uint64;
+}
+
+/**
+ Handle an MMIO event.
+
+ Use the VMGEXIT instruction to handle either an MMIO read or an MMIO write.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in, out] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+MmioExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN OUT SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 ExitInfo1, ExitInfo2, Status;
+ UINTN Bytes;
+ UINT64 *Register;
+ UINT8 OpCode, SignByte;
+ UINTN Address;
+
+ Bytes = 0;
+
+ OpCode = *(InstructionData->OpCodes);
+ if (OpCode == TWO_BYTE_OPCODE_ESCAPE) {
+ OpCode = *(InstructionData->OpCodes + 1);
+ }
+
+ switch (OpCode) {
+ //
+ // MMIO write (MOV reg/memX, regX)
+ //
+ case 0x88:
+ Bytes = 1;
+ //
+ // fall through
+ //
+ case 0x89:
+ DecodeModRm (Regs, InstructionData);
+ Bytes = ((Bytes != 0) ? Bytes :
+ (InstructionData->DataSize == Size16Bits) ? 2 :
+ (InstructionData->DataSize == Size32Bits) ? 4 :
+ (InstructionData->DataSize == Size64Bits) ? 8 :
+ 0);
+
+ if (InstructionData->Ext.ModRm.Mod == 3) {
+ //
+ // NPF on two register operands???
+ //
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ Status = ValidateMmioMemory (Ghcb, InstructionData->Ext.RmData, Bytes);
+ if (Status != 0) {
+ return Status;
+ }
+
+ ExitInfo1 = InstructionData->Ext.RmData;
+ ExitInfo2 = Bytes;
+ CopyMem (Ghcb->SharedBuffer, &InstructionData->Ext.RegData, Bytes);
+
+ Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwScratch);
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_MMIO_WRITE, ExitInfo1, ExitInfo2);
+ if (Status != 0) {
+ return Status;
+ }
+
+ break;
+
+ //
+ // MMIO write (MOV moffsetX, aX)
+ //
+ case 0xA2:
+ Bytes = 1;
+ //
+ // fall through
+ //
+ case 0xA3:
+ Bytes = ((Bytes != 0) ? Bytes :
+ (InstructionData->DataSize == Size16Bits) ? 2 :
+ (InstructionData->DataSize == Size32Bits) ? 4 :
+ (InstructionData->DataSize == Size64Bits) ? 8 :
+ 0);
+
+ InstructionData->ImmediateSize = (UINTN)(1 << InstructionData->AddrSize);
+ InstructionData->End += InstructionData->ImmediateSize;
+
+ //
+ // This code is X64 only, so a possible 8-byte copy to a UINTN is ok.
+ // Use a STATIC_ASSERT to be certain the code is being built as X64.
+ //
+ STATIC_ASSERT (
+ sizeof (UINTN) == sizeof (UINT64),
+ "sizeof (UINTN) != sizeof (UINT64), this file must be built as X64"
+ );
+
+ Address = 0;
+ CopyMem (
+ &Address,
+ InstructionData->Immediate,
+ InstructionData->ImmediateSize
+ );
+
+ Status = ValidateMmioMemory (Ghcb, Address, Bytes);
+ if (Status != 0) {
+ return Status;
+ }
+
+ ExitInfo1 = Address;
+ ExitInfo2 = Bytes;
+ CopyMem (Ghcb->SharedBuffer, &Regs->Rax, Bytes);
+
+ Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwScratch);
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_MMIO_WRITE, ExitInfo1, ExitInfo2);
+ if (Status != 0) {
+ return Status;
+ }
+
+ break;
+
+ //
+ // MMIO write (MOV reg/memX, immX)
+ //
+ case 0xC6:
+ Bytes = 1;
+ //
+ // fall through
+ //
+ case 0xC7:
+ DecodeModRm (Regs, InstructionData);
+ Bytes = ((Bytes != 0) ? Bytes :
+ (InstructionData->DataSize == Size16Bits) ? 2 :
+ (InstructionData->DataSize == Size32Bits) ? 4 :
+ 0);
+
+ InstructionData->ImmediateSize = Bytes;
+ InstructionData->End += Bytes;
+
+ Status = ValidateMmioMemory (Ghcb, InstructionData->Ext.RmData, Bytes);
+ if (Status != 0) {
+ return Status;
+ }
+
+ ExitInfo1 = InstructionData->Ext.RmData;
+ ExitInfo2 = Bytes;
+ CopyMem (Ghcb->SharedBuffer, InstructionData->Immediate, Bytes);
+
+ Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwScratch);
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_MMIO_WRITE, ExitInfo1, ExitInfo2);
+ if (Status != 0) {
+ return Status;
+ }
+
+ break;
+
+ //
+ // MMIO read (MOV regX, reg/memX)
+ //
+ case 0x8A:
+ Bytes = 1;
+ //
+ // fall through
+ //
+ case 0x8B:
+ DecodeModRm (Regs, InstructionData);
+ Bytes = ((Bytes != 0) ? Bytes :
+ (InstructionData->DataSize == Size16Bits) ? 2 :
+ (InstructionData->DataSize == Size32Bits) ? 4 :
+ (InstructionData->DataSize == Size64Bits) ? 8 :
+ 0);
+ if (InstructionData->Ext.ModRm.Mod == 3) {
+ //
+ // NPF on two register operands???
+ //
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ Status = ValidateMmioMemory (Ghcb, InstructionData->Ext.RmData, Bytes);
+ if (Status != 0) {
+ return Status;
+ }
+
+ ExitInfo1 = InstructionData->Ext.RmData;
+ ExitInfo2 = Bytes;
+
+ Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwScratch);
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_MMIO_READ, ExitInfo1, ExitInfo2);
+ if (Status != 0) {
+ return Status;
+ }
+
+ Register = GetRegisterPointer (Regs, InstructionData->Ext.ModRm.Reg);
+ if (Bytes == 4) {
+ //
+ // Zero-extend for 32-bit operation
+ //
+ *Register = 0;
+ }
+
+ CopyMem (Register, Ghcb->SharedBuffer, Bytes);
+ break;
+
+ //
+ // MMIO read (MOV aX, moffsetX)
+ //
+ case 0xA0:
+ Bytes = 1;
+ //
+ // fall through
+ //
+ case 0xA1:
+ Bytes = ((Bytes != 0) ? Bytes :
+ (InstructionData->DataSize == Size16Bits) ? 2 :
+ (InstructionData->DataSize == Size32Bits) ? 4 :
+ (InstructionData->DataSize == Size64Bits) ? 8 :
+ 0);
+
+ InstructionData->ImmediateSize = (UINTN)(1 << InstructionData->AddrSize);
+ InstructionData->End += InstructionData->ImmediateSize;
+
+ //
+ // This code is X64 only, so a possible 8-byte copy to a UINTN is ok.
+ // Use a STATIC_ASSERT to be certain the code is being built as X64.
+ //
+ STATIC_ASSERT (
+ sizeof (UINTN) == sizeof (UINT64),
+ "sizeof (UINTN) != sizeof (UINT64), this file must be built as X64"
+ );
+
+ Address = 0;
+ CopyMem (
+ &Address,
+ InstructionData->Immediate,
+ InstructionData->ImmediateSize
+ );
+
+ Status = ValidateMmioMemory (Ghcb, Address, Bytes);
+ if (Status != 0) {
+ return Status;
+ }
+
+ ExitInfo1 = Address;
+ ExitInfo2 = Bytes;
+
+ Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwScratch);
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_MMIO_READ, ExitInfo1, ExitInfo2);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if (Bytes == 4) {
+ //
+ // Zero-extend for 32-bit operation
+ //
+ Regs->Rax = 0;
+ }
+
+ CopyMem (&Regs->Rax, Ghcb->SharedBuffer, Bytes);
+ break;
+
+ //
+ // MMIO read w/ zero-extension ((MOVZX regX, reg/memX)
+ //
+ case 0xB6:
+ Bytes = 1;
+ //
+ // fall through
+ //
+ case 0xB7:
+ DecodeModRm (Regs, InstructionData);
+ Bytes = (Bytes != 0) ? Bytes : 2;
+
+ Status = ValidateMmioMemory (Ghcb, InstructionData->Ext.RmData, Bytes);
+ if (Status != 0) {
+ return Status;
+ }
+
+ ExitInfo1 = InstructionData->Ext.RmData;
+ ExitInfo2 = Bytes;
+
+ Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwScratch);
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_MMIO_READ, ExitInfo1, ExitInfo2);
+ if (Status != 0) {
+ return Status;
+ }
+
+ Register = GetRegisterPointer (Regs, InstructionData->Ext.ModRm.Reg);
+ SetMem (Register, (UINTN)(1 << InstructionData->DataSize), 0);
+ CopyMem (Register, Ghcb->SharedBuffer, Bytes);
+ break;
+
+ //
+ // MMIO read w/ sign-extension (MOVSX regX, reg/memX)
+ //
+ case 0xBE:
+ Bytes = 1;
+ //
+ // fall through
+ //
+ case 0xBF:
+ DecodeModRm (Regs, InstructionData);
+ Bytes = (Bytes != 0) ? Bytes : 2;
+
+ Status = ValidateMmioMemory (Ghcb, InstructionData->Ext.RmData, Bytes);
+ if (Status != 0) {
+ return Status;
+ }
+
+ ExitInfo1 = InstructionData->Ext.RmData;
+ ExitInfo2 = Bytes;
+
+ Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwScratch);
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_MMIO_READ, ExitInfo1, ExitInfo2);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if (Bytes == 1) {
+ UINT8 *Data;
+
+ Data = (UINT8 *)Ghcb->SharedBuffer;
+ SignByte = ((*Data & BIT7) != 0) ? 0xFF : 0x00;
+ } else {
+ UINT16 *Data;
+
+ Data = (UINT16 *)Ghcb->SharedBuffer;
+ SignByte = ((*Data & BIT15) != 0) ? 0xFF : 0x00;
+ }
+
+ Register = GetRegisterPointer (Regs, InstructionData->Ext.ModRm.Reg);
+ SetMem (Register, (UINTN)(1 << InstructionData->DataSize), SignByte);
+ CopyMem (Register, Ghcb->SharedBuffer, Bytes);
+ break;
+
+ default:
+ DEBUG ((DEBUG_ERROR, "Invalid MMIO opcode (%x)\n", OpCode));
+ Status = GP_EXCEPTION;
+ ASSERT (FALSE);
+ }
+
+ return Status;
+}
+
+/**
+ Handle a MWAIT event.
+
+ Use the VMGEXIT instruction to handle a MWAIT event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+MwaitExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ DecodeModRm (Regs, InstructionData);
+
+ Ghcb->SaveArea.Rax = Regs->Rax;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRax);
+ Ghcb->SaveArea.Rcx = Regs->Rcx;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRcx);
+
+ return CcExitLibVmgExit (Ghcb, SVM_EXIT_MWAIT, 0, 0);
+}
+
+/**
+ Handle a MONITOR event.
+
+ Use the VMGEXIT instruction to handle a MONITOR event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+MonitorExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ DecodeModRm (Regs, InstructionData);
+
+ Ghcb->SaveArea.Rax = Regs->Rax; // Identity mapped, so VA = PA
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRax);
+ Ghcb->SaveArea.Rcx = Regs->Rcx;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRcx);
+ Ghcb->SaveArea.Rdx = Regs->Rdx;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRdx);
+
+ return CcExitLibVmgExit (Ghcb, SVM_EXIT_MONITOR, 0, 0);
+}
+
+/**
+ Handle a WBINVD event.
+
+ Use the VMGEXIT instruction to handle a WBINVD event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+WbinvdExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ return CcExitLibVmgExit (Ghcb, SVM_EXIT_WBINVD, 0, 0);
+}
+
+/**
+ Handle a RDTSCP event.
+
+ Use the VMGEXIT instruction to handle a RDTSCP event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+RdtscpExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 Status;
+
+ DecodeModRm (Regs, InstructionData);
+
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_RDTSCP, 0, 0);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if (!CcExitLibVmgIsOffsetValid (Ghcb, GhcbRax) ||
+ !CcExitLibVmgIsOffsetValid (Ghcb, GhcbRcx) ||
+ !CcExitLibVmgIsOffsetValid (Ghcb, GhcbRdx))
+ {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ Regs->Rax = Ghcb->SaveArea.Rax;
+ Regs->Rcx = Ghcb->SaveArea.Rcx;
+ Regs->Rdx = Ghcb->SaveArea.Rdx;
+
+ return 0;
+}
+
+/**
+ Handle a VMMCALL event.
+
+ Use the VMGEXIT instruction to handle a VMMCALL event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+VmmCallExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 Status;
+
+ DecodeModRm (Regs, InstructionData);
+
+ Ghcb->SaveArea.Rax = Regs->Rax;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRax);
+ Ghcb->SaveArea.Cpl = (UINT8)(Regs->Cs & 0x3);
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbCpl);
+
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_VMMCALL, 0, 0);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if (!CcExitLibVmgIsOffsetValid (Ghcb, GhcbRax)) {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ Regs->Rax = Ghcb->SaveArea.Rax;
+
+ return 0;
+}
+
+/**
+ Handle an MSR event.
+
+ Use the VMGEXIT instruction to handle either a RDMSR or WRMSR event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+MsrExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 ExitInfo1, Status;
+
+ ExitInfo1 = 0;
+
+ switch (*(InstructionData->OpCodes + 1)) {
+ case 0x30: // WRMSR
+ ExitInfo1 = 1;
+ Ghcb->SaveArea.Rax = Regs->Rax;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRax);
+ Ghcb->SaveArea.Rdx = Regs->Rdx;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRdx);
+ //
+ // fall through
+ //
+ case 0x32: // RDMSR
+ Ghcb->SaveArea.Rcx = Regs->Rcx;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRcx);
+ break;
+ default:
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_MSR, ExitInfo1, 0);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if (ExitInfo1 == 0) {
+ if (!CcExitLibVmgIsOffsetValid (Ghcb, GhcbRax) ||
+ !CcExitLibVmgIsOffsetValid (Ghcb, GhcbRdx))
+ {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ Regs->Rax = Ghcb->SaveArea.Rax;
+ Regs->Rdx = Ghcb->SaveArea.Rdx;
+ }
+
+ return 0;
+}
+
+/**
+ Build the IOIO event information.
+
+ The IOIO event information identifies the type of IO operation to be performed
+ by the hypervisor. Build this information based on the instruction data.
+
+ @param[in] Regs x64 processor context
+ @param[in, out] InstructionData Instruction parsing context
+
+ @return IOIO event information value
+
+**/
+STATIC
+UINT64
+IoioExitInfo (
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN OUT SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 ExitInfo;
+
+ ExitInfo = 0;
+
+ switch (*(InstructionData->OpCodes)) {
+ //
+ // INS opcodes
+ //
+ case 0x6C:
+ case 0x6D:
+ ExitInfo |= IOIO_TYPE_INS;
+ ExitInfo |= IOIO_SEG_ES;
+ ExitInfo |= ((Regs->Rdx & 0xffff) << 16);
+ break;
+
+ //
+ // OUTS opcodes
+ //
+ case 0x6E:
+ case 0x6F:
+ ExitInfo |= IOIO_TYPE_OUTS;
+ ExitInfo |= IOIO_SEG_DS;
+ ExitInfo |= ((Regs->Rdx & 0xffff) << 16);
+ break;
+
+ //
+ // IN immediate opcodes
+ //
+ case 0xE4:
+ case 0xE5:
+ InstructionData->ImmediateSize = 1;
+ InstructionData->End++;
+ ExitInfo |= IOIO_TYPE_IN;
+ ExitInfo |= ((*(InstructionData->OpCodes + 1)) << 16);
+ break;
+
+ //
+ // OUT immediate opcodes
+ //
+ case 0xE6:
+ case 0xE7:
+ InstructionData->ImmediateSize = 1;
+ InstructionData->End++;
+ ExitInfo |= IOIO_TYPE_OUT;
+ ExitInfo |= ((*(InstructionData->OpCodes + 1)) << 16) | IOIO_TYPE_OUT;
+ break;
+
+ //
+ // IN register opcodes
+ //
+ case 0xEC:
+ case 0xED:
+ ExitInfo |= IOIO_TYPE_IN;
+ ExitInfo |= ((Regs->Rdx & 0xffff) << 16);
+ break;
+
+ //
+ // OUT register opcodes
+ //
+ case 0xEE:
+ case 0xEF:
+ ExitInfo |= IOIO_TYPE_OUT;
+ ExitInfo |= ((Regs->Rdx & 0xffff) << 16);
+ break;
+
+ default:
+ return 0;
+ }
+
+ switch (*(InstructionData->OpCodes)) {
+ //
+ // Single-byte opcodes
+ //
+ case 0x6C:
+ case 0x6E:
+ case 0xE4:
+ case 0xE6:
+ case 0xEC:
+ case 0xEE:
+ ExitInfo |= IOIO_DATA_8;
+ break;
+
+ //
+ // Length determined by instruction parsing
+ //
+ default:
+ ExitInfo |= (InstructionData->DataSize == Size16Bits) ? IOIO_DATA_16
+ : IOIO_DATA_32;
+ }
+
+ switch (InstructionData->AddrSize) {
+ case Size16Bits:
+ ExitInfo |= IOIO_ADDR_16;
+ break;
+
+ case Size32Bits:
+ ExitInfo |= IOIO_ADDR_32;
+ break;
+
+ case Size64Bits:
+ ExitInfo |= IOIO_ADDR_64;
+ break;
+
+ default:
+ break;
+ }
+
+ if (InstructionData->RepMode != 0) {
+ ExitInfo |= IOIO_REP;
+ }
+
+ return ExitInfo;
+}
+
+/**
+ Handle an IOIO event.
+
+ Use the VMGEXIT instruction to handle an IOIO event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+IoioExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 ExitInfo1, ExitInfo2, Status;
+ BOOLEAN IsString;
+
+ ExitInfo1 = IoioExitInfo (Regs, InstructionData);
+ if (ExitInfo1 == 0) {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ IsString = ((ExitInfo1 & IOIO_TYPE_STR) != 0) ? TRUE : FALSE;
+ if (IsString) {
+ UINTN IoBytes, VmgExitBytes;
+ UINTN GhcbCount, OpCount;
+
+ Status = 0;
+
+ IoBytes = IOIO_DATA_BYTES (ExitInfo1);
+ GhcbCount = sizeof (Ghcb->SharedBuffer) / IoBytes;
+
+ OpCount = ((ExitInfo1 & IOIO_REP) != 0) ? Regs->Rcx : 1;
+ while (OpCount != 0) {
+ ExitInfo2 = MIN (OpCount, GhcbCount);
+ VmgExitBytes = ExitInfo2 * IoBytes;
+
+ if ((ExitInfo1 & IOIO_TYPE_IN) == 0) {
+ CopyMem (Ghcb->SharedBuffer, (VOID *)Regs->Rsi, VmgExitBytes);
+ Regs->Rsi += VmgExitBytes;
+ }
+
+ Ghcb->SaveArea.SwScratch = (UINT64)Ghcb->SharedBuffer;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbSwScratch);
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_IOIO_PROT, ExitInfo1, ExitInfo2);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if ((ExitInfo1 & IOIO_TYPE_IN) != 0) {
+ CopyMem ((VOID *)Regs->Rdi, Ghcb->SharedBuffer, VmgExitBytes);
+ Regs->Rdi += VmgExitBytes;
+ }
+
+ if ((ExitInfo1 & IOIO_REP) != 0) {
+ Regs->Rcx -= ExitInfo2;
+ }
+
+ OpCount -= ExitInfo2;
+ }
+ } else {
+ if ((ExitInfo1 & IOIO_TYPE_IN) != 0) {
+ Ghcb->SaveArea.Rax = 0;
+ } else {
+ CopyMem (&Ghcb->SaveArea.Rax, &Regs->Rax, IOIO_DATA_BYTES (ExitInfo1));
+ }
+
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRax);
+
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_IOIO_PROT, ExitInfo1, 0);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if ((ExitInfo1 & IOIO_TYPE_IN) != 0) {
+ if (!CcExitLibVmgIsOffsetValid (Ghcb, GhcbRax)) {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ CopyMem (&Regs->Rax, &Ghcb->SaveArea.Rax, IOIO_DATA_BYTES (ExitInfo1));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ Handle a INVD event.
+
+ Use the VMGEXIT instruction to handle a INVD event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+InvdExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ return CcExitLibVmgExit (Ghcb, SVM_EXIT_INVD, 0, 0);
+}
+
+/**
+ Fetch CPUID leaf/function via hypervisor/VMGEXIT.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in] EaxIn EAX input for cpuid instruction
+ @param[in] EcxIn ECX input for cpuid instruction
+ @param[in] Xcr0In XCR0 at time of cpuid instruction
+ @param[in, out] Eax Pointer to store leaf's EAX value
+ @param[in, out] Ebx Pointer to store leaf's EBX value
+ @param[in, out] Ecx Pointer to store leaf's ECX value
+ @param[in, out] Edx Pointer to store leaf's EDX value
+ @param[in, out] Status Pointer to store status from VMGEXIT (always 0
+ unless return value indicates failure)
+ @param[in, out] Unsupported Pointer to store indication of unsupported
+ VMGEXIT (always false unless return value
+ indicates failure)
+
+ @retval TRUE CPUID leaf fetch successfully.
+ @retval FALSE Error occurred while fetching CPUID leaf. Callers
+ should Status and Unsupported and handle
+ accordingly if they indicate a more precise
+ error condition.
+
+**/
+STATIC
+BOOLEAN
+GetCpuidHyp (
+ IN OUT GHCB *Ghcb,
+ IN UINT32 EaxIn,
+ IN UINT32 EcxIn,
+ IN UINT64 XCr0,
+ IN OUT UINT32 *Eax,
+ IN OUT UINT32 *Ebx,
+ IN OUT UINT32 *Ecx,
+ IN OUT UINT32 *Edx,
+ IN OUT UINT64 *Status,
+ IN OUT BOOLEAN *UnsupportedExit
+ )
+{
+ *UnsupportedExit = FALSE;
+ Ghcb->SaveArea.Rax = EaxIn;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRax);
+ Ghcb->SaveArea.Rcx = EcxIn;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRcx);
+ if (EaxIn == CPUID_EXTENDED_STATE) {
+ Ghcb->SaveArea.XCr0 = XCr0;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbXCr0);
+ }
+
+ *Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_CPUID, 0, 0);
+ if (*Status != 0) {
+ return FALSE;
+ }
+
+ if (!CcExitLibVmgIsOffsetValid (Ghcb, GhcbRax) ||
+ !CcExitLibVmgIsOffsetValid (Ghcb, GhcbRbx) ||
+ !CcExitLibVmgIsOffsetValid (Ghcb, GhcbRcx) ||
+ !CcExitLibVmgIsOffsetValid (Ghcb, GhcbRdx))
+ {
+ *UnsupportedExit = TRUE;
+ return FALSE;
+ }
+
+ if (Eax) {
+ *Eax = (UINT32)(UINTN)Ghcb->SaveArea.Rax;
+ }
+
+ if (Ebx) {
+ *Ebx = (UINT32)(UINTN)Ghcb->SaveArea.Rbx;
+ }
+
+ if (Ecx) {
+ *Ecx = (UINT32)(UINTN)Ghcb->SaveArea.Rcx;
+ }
+
+ if (Edx) {
+ *Edx = (UINT32)(UINTN)Ghcb->SaveArea.Rdx;
+ }
+
+ return TRUE;
+}
+
+/**
+ Check if SEV-SNP enabled.
+
+ @retval TRUE SEV-SNP is enabled.
+ @retval FALSE SEV-SNP is disabled.
+
+**/
+STATIC
+BOOLEAN
+SnpEnabled (
+ VOID
+ )
+{
+ MSR_SEV_STATUS_REGISTER Msr;
+
+ Msr.Uint32 = AsmReadMsr32 (MSR_SEV_STATUS);
+
+ return !!Msr.Bits.SevSnpBit;
+}
+
+/**
+ Calculate the total XSAVE area size for enabled XSAVE areas
+
+ @param[in] XFeaturesEnabled Bit-mask of enabled XSAVE features/areas as
+ indicated by XCR0/MSR_IA32_XSS bits
+ @param[in] XSaveBaseSize Base/legacy XSAVE area size (e.g. when
+ XCR0 is 1)
+ @param[in, out] XSaveSize Pointer to storage for calculated XSAVE area
+ size
+ @param[in] Compacted Whether or not the calculation is for the
+ normal XSAVE area size (leaf 0xD,0x0,EBX) or
+ compacted XSAVE area size (leaf 0xD,0x1,EBX)
+
+
+ @retval TRUE XSAVE size calculation was successful.
+ @retval FALSE XSAVE size calculation was unsuccessful.
+**/
+STATIC
+BOOLEAN
+GetCpuidXSaveSize (
+ IN UINT64 XFeaturesEnabled,
+ IN UINT32 XSaveBaseSize,
+ IN OUT UINT32 *XSaveSize,
+ IN BOOLEAN Compacted
+ )
+{
+ SEV_SNP_CPUID_INFO *CpuidInfo;
+ UINT64 XFeaturesFound = 0;
+ UINT32 Idx;
+
+ *XSaveSize = XSaveBaseSize;
+ CpuidInfo = (SEV_SNP_CPUID_INFO *)(UINT64)PcdGet32 (PcdOvmfCpuidBase);
+
+ for (Idx = 0; Idx < CpuidInfo->Count; Idx++) {
+ SEV_SNP_CPUID_FUNCTION *CpuidFn = &CpuidInfo->function[Idx];
+
+ if (!((CpuidFn->EaxIn == 0xD) &&
+ ((CpuidFn->EcxIn == 0) || (CpuidFn->EcxIn == 1))))
+ {
+ continue;
+ }
+
+ if (XFeaturesFound & (1ULL << CpuidFn->EcxIn) ||
+ !(XFeaturesEnabled & (1ULL << CpuidFn->EcxIn)))
+ {
+ continue;
+ }
+
+ XFeaturesFound |= (1ULL << CpuidFn->EcxIn);
+ if (Compacted) {
+ *XSaveSize += CpuidFn->Eax;
+ } else {
+ *XSaveSize = MAX (*XSaveSize, CpuidFn->Eax + CpuidFn->Ebx);
+ }
+ }
+
+ /*
+ * Either the guest set unsupported XCR0/XSS bits, or the corresponding
+ * entries in the CPUID table were not present. This is an invalid state.
+ */
+ if (XFeaturesFound != (XFeaturesEnabled & ~3UL)) {
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/**
+ Check if a CPUID leaf/function is indexed via ECX sub-leaf/sub-function
+
+ @param[in] EaxIn EAX input for cpuid instruction
+
+ @retval FALSE cpuid leaf/function is not indexed by ECX input
+ @retval TRUE cpuid leaf/function is indexed by ECX input
+
+**/
+STATIC
+BOOLEAN
+IsFunctionIndexed (
+ IN UINT32 EaxIn
+ )
+{
+ switch (EaxIn) {
+ case CPUID_CACHE_PARAMS:
+ case CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS:
+ case CPUID_EXTENDED_TOPOLOGY:
+ case CPUID_EXTENDED_STATE:
+ case CPUID_INTEL_RDT_MONITORING:
+ case CPUID_INTEL_RDT_ALLOCATION:
+ case CPUID_INTEL_SGX:
+ case CPUID_INTEL_PROCESSOR_TRACE:
+ case CPUID_DETERMINISTIC_ADDRESS_TRANSLATION_PARAMETERS:
+ case CPUID_V2_EXTENDED_TOPOLOGY:
+ case 0x8000001D: /* Cache Topology Information */
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/**
+ Fetch CPUID leaf/function via SEV-SNP CPUID table.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in] EaxIn EAX input for cpuid instruction
+ @param[in] EcxIn ECX input for cpuid instruction
+ @param[in] Xcr0In XCR0 at time of cpuid instruction
+ @param[in, out] Eax Pointer to store leaf's EAX value
+ @param[in, out] Ebx Pointer to store leaf's EBX value
+ @param[in, out] Ecx Pointer to store leaf's ECX value
+ @param[in, out] Edx Pointer to store leaf's EDX value
+ @param[in, out] Status Pointer to store status from VMGEXIT (always 0
+ unless return value indicates failure)
+ @param[in, out] Unsupported Pointer to store indication of unsupported
+ VMGEXIT (always false unless return value
+ indicates failure)
+
+ @retval TRUE CPUID leaf fetch successfully.
+ @retval FALSE Error occurred while fetching CPUID leaf. Callers
+ should Status and Unsupported and handle
+ accordingly if they indicate a more precise
+ error condition.
+
+**/
+STATIC
+BOOLEAN
+GetCpuidFw (
+ IN OUT GHCB *Ghcb,
+ IN UINT32 EaxIn,
+ IN UINT32 EcxIn,
+ IN UINT64 XCr0,
+ IN OUT UINT32 *Eax,
+ IN OUT UINT32 *Ebx,
+ IN OUT UINT32 *Ecx,
+ IN OUT UINT32 *Edx,
+ IN OUT UINT64 *Status,
+ IN OUT BOOLEAN *Unsupported
+ )
+{
+ SEV_SNP_CPUID_INFO *CpuidInfo;
+ BOOLEAN Found;
+ UINT32 Idx;
+
+ CpuidInfo = (SEV_SNP_CPUID_INFO *)(UINT64)PcdGet32 (PcdOvmfCpuidBase);
+ Found = FALSE;
+
+ for (Idx = 0; Idx < CpuidInfo->Count; Idx++) {
+ SEV_SNP_CPUID_FUNCTION *CpuidFn = &CpuidInfo->function[Idx];
+
+ if (CpuidFn->EaxIn != EaxIn) {
+ continue;
+ }
+
+ if (IsFunctionIndexed (CpuidFn->EaxIn) && (CpuidFn->EcxIn != EcxIn)) {
+ continue;
+ }
+
+ *Eax = CpuidFn->Eax;
+ *Ebx = CpuidFn->Ebx;
+ *Ecx = CpuidFn->Ecx;
+ *Edx = CpuidFn->Edx;
+
+ Found = TRUE;
+ break;
+ }
+
+ if (!Found) {
+ *Eax = *Ebx = *Ecx = *Edx = 0;
+ goto Out;
+ }
+
+ if (EaxIn == CPUID_VERSION_INFO) {
+ IA32_CR4 Cr4;
+ UINT32 Ebx2;
+ UINT32 Edx2;
+
+ if (!GetCpuidHyp (
+ Ghcb,
+ EaxIn,
+ EcxIn,
+ XCr0,
+ NULL,
+ &Ebx2,
+ NULL,
+ &Edx2,
+ Status,
+ Unsupported
+ ))
+ {
+ return FALSE;
+ }
+
+ /* initial APIC ID */
+ *Ebx = (*Ebx & 0x00FFFFFF) | (Ebx2 & 0xFF000000);
+ /* APIC enabled bit */
+ *Edx = (*Edx & ~BIT9) | (Edx2 & BIT9);
+ /* OSXSAVE enabled bit */
+ Cr4.UintN = AsmReadCr4 ();
+ *Ecx = (Cr4.Bits.OSXSAVE) ? (*Ecx & ~BIT27) | (*Ecx & BIT27)
+ : (*Ecx & ~BIT27);
+ } else if (EaxIn == CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS) {
+ IA32_CR4 Cr4;
+
+ Cr4.UintN = AsmReadCr4 ();
+ /* OSPKE enabled bit */
+ *Ecx = (Cr4.Bits.PKE) ? (*Ecx | BIT4) : (*Ecx & ~BIT4);
+ } else if (EaxIn == CPUID_EXTENDED_TOPOLOGY) {
+ if (!GetCpuidHyp (
+ Ghcb,
+ EaxIn,
+ EcxIn,
+ XCr0,
+ NULL,
+ NULL,
+ NULL,
+ Edx,
+ Status,
+ Unsupported
+ ))
+ {
+ return FALSE;
+ }
+ } else if ((EaxIn == CPUID_EXTENDED_STATE) && ((EcxIn == 0) || (EcxIn == 1))) {
+ MSR_IA32_XSS_REGISTER XssMsr;
+ BOOLEAN Compacted;
+ UINT32 XSaveSize;
+
+ XssMsr.Uint64 = 0;
+ Compacted = FALSE;
+ if (EcxIn == 1) {
+ /*
+ * The PPR and APM aren't clear on what size should be encoded in
+ * 0xD:0x1:EBX when compaction is not enabled by either XSAVEC or
+ * XSAVES, as these are generally fixed to 1 on real CPUs. Report
+ * this undefined case as an error.
+ */
+ if (!(*Eax & (BIT3 | BIT1))) {
+ /* (XSAVES | XSAVEC) */
+ return FALSE;
+ }
+
+ Compacted = TRUE;
+ XssMsr.Uint64 = AsmReadMsr64 (MSR_IA32_XSS);
+ }
+
+ if (!GetCpuidXSaveSize (
+ XCr0 | XssMsr.Uint64,
+ *Ebx,
+ &XSaveSize,
+ Compacted
+ ))
+ {
+ return FALSE;
+ }
+
+ *Ebx = XSaveSize;
+ } else if (EaxIn == 0x8000001E) {
+ UINT32 Ebx2;
+ UINT32 Ecx2;
+
+ /* extended APIC ID */
+ if (!GetCpuidHyp (
+ Ghcb,
+ EaxIn,
+ EcxIn,
+ XCr0,
+ Eax,
+ &Ebx2,
+ &Ecx2,
+ NULL,
+ Status,
+ Unsupported
+ ))
+ {
+ return FALSE;
+ }
+
+ /* compute ID */
+ *Ebx = (*Ebx & 0xFFFFFF00) | (Ebx2 & 0x000000FF);
+ /* node ID */
+ *Ecx = (*Ecx & 0xFFFFFF00) | (Ecx2 & 0x000000FF);
+ }
+
+Out:
+ *Status = 0;
+ *Unsupported = FALSE;
+ return TRUE;
+}
+
+/**
+ Handle a CPUID event.
+
+ Use VMGEXIT instruction or CPUID table to handle a CPUID event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+CpuidExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ BOOLEAN Unsupported;
+ UINT64 Status;
+ UINT32 EaxIn;
+ UINT32 EcxIn;
+ UINT64 XCr0;
+ UINT32 Eax;
+ UINT32 Ebx;
+ UINT32 Ecx;
+ UINT32 Edx;
+
+ EaxIn = (UINT32)(UINTN)Regs->Rax;
+ EcxIn = (UINT32)(UINTN)Regs->Rcx;
+
+ if (EaxIn == CPUID_EXTENDED_STATE) {
+ IA32_CR4 Cr4;
+
+ Cr4.UintN = AsmReadCr4 ();
+ Ghcb->SaveArea.XCr0 = (Cr4.Bits.OSXSAVE == 1) ? AsmXGetBv (0) : 1;
+ XCr0 = (Cr4.Bits.OSXSAVE == 1) ? AsmXGetBv (0) : 1;
+ }
+
+ if (SnpEnabled ()) {
+ if (!GetCpuidFw (
+ Ghcb,
+ EaxIn,
+ EcxIn,
+ XCr0,
+ &Eax,
+ &Ebx,
+ &Ecx,
+ &Edx,
+ &Status,
+ &Unsupported
+ ))
+ {
+ goto CpuidFail;
+ }
+ } else {
+ if (!GetCpuidHyp (
+ Ghcb,
+ EaxIn,
+ EcxIn,
+ XCr0,
+ &Eax,
+ &Ebx,
+ &Ecx,
+ &Edx,
+ &Status,
+ &Unsupported
+ ))
+ {
+ goto CpuidFail;
+ }
+ }
+
+ Regs->Rax = Eax;
+ Regs->Rbx = Ebx;
+ Regs->Rcx = Ecx;
+ Regs->Rdx = Edx;
+
+ return 0;
+
+CpuidFail:
+ if (Unsupported) {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ return Status;
+}
+
+/**
+ Handle a RDPMC event.
+
+ Use the VMGEXIT instruction to handle a RDPMC event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+RdpmcExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 Status;
+
+ Ghcb->SaveArea.Rcx = Regs->Rcx;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRcx);
+
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_RDPMC, 0, 0);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if (!CcExitLibVmgIsOffsetValid (Ghcb, GhcbRax) ||
+ !CcExitLibVmgIsOffsetValid (Ghcb, GhcbRdx))
+ {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ Regs->Rax = Ghcb->SaveArea.Rax;
+ Regs->Rdx = Ghcb->SaveArea.Rdx;
+
+ return 0;
+}
+
+/**
+ Handle a RDTSC event.
+
+ Use the VMGEXIT instruction to handle a RDTSC event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+RdtscExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ UINT64 Status;
+
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_RDTSC, 0, 0);
+ if (Status != 0) {
+ return Status;
+ }
+
+ if (!CcExitLibVmgIsOffsetValid (Ghcb, GhcbRax) ||
+ !CcExitLibVmgIsOffsetValid (Ghcb, GhcbRdx))
+ {
+ return UnsupportedExit (Ghcb, Regs, InstructionData);
+ }
+
+ Regs->Rax = Ghcb->SaveArea.Rax;
+ Regs->Rdx = Ghcb->SaveArea.Rdx;
+
+ return 0;
+}
+
+/**
+ Handle a DR7 register write event.
+
+ Use the VMGEXIT instruction to handle a DR7 write event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+
+**/
+STATIC
+UINT64
+Dr7WriteExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ SEV_ES_INSTRUCTION_OPCODE_EXT *Ext;
+ SEV_ES_PER_CPU_DATA *SevEsData;
+ UINT64 *Register;
+ UINT64 Status;
+
+ Ext = &InstructionData->Ext;
+ SevEsData = (SEV_ES_PER_CPU_DATA *)(Ghcb + 1);
+
+ DecodeModRm (Regs, InstructionData);
+
+ //
+ // MOV DRn always treats MOD == 3 no matter how encoded
+ //
+ Register = GetRegisterPointer (Regs, Ext->ModRm.Rm);
+
+ //
+ // Using a value of 0 for ExitInfo1 means RAX holds the value
+ //
+ Ghcb->SaveArea.Rax = *Register;
+ CcExitLibVmgSetOffsetValid (Ghcb, GhcbRax);
+
+ Status = CcExitLibVmgExit (Ghcb, SVM_EXIT_DR7_WRITE, 0, 0);
+ if (Status != 0) {
+ return Status;
+ }
+
+ SevEsData->Dr7 = *Register;
+ SevEsData->Dr7Cached = 1;
+
+ return 0;
+}
+
+/**
+ Handle a DR7 register read event.
+
+ Use the VMGEXIT instruction to handle a DR7 read event.
+
+ @param[in, out] Ghcb Pointer to the Guest-Hypervisor Communication
+ Block
+ @param[in, out] Regs x64 processor context
+ @param[in] InstructionData Instruction parsing context
+
+ @retval 0 Event handled successfully
+
+**/
+STATIC
+UINT64
+Dr7ReadExit (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN SEV_ES_INSTRUCTION_DATA *InstructionData
+ )
+{
+ SEV_ES_INSTRUCTION_OPCODE_EXT *Ext;
+ SEV_ES_PER_CPU_DATA *SevEsData;
+ UINT64 *Register;
+
+ Ext = &InstructionData->Ext;
+ SevEsData = (SEV_ES_PER_CPU_DATA *)(Ghcb + 1);
+
+ DecodeModRm (Regs, InstructionData);
+
+ //
+ // MOV DRn always treats MOD == 3 no matter how encoded
+ //
+ Register = GetRegisterPointer (Regs, Ext->ModRm.Rm);
+
+ //
+ // If there is a cached valued for DR7, return that. Otherwise return the
+ // DR7 standard reset value of 0x400 (no debug breakpoints set).
+ //
+ *Register = (SevEsData->Dr7Cached == 1) ? SevEsData->Dr7 : 0x400;
+
+ return 0;
+}
+
+/**
+ Handle a #VC exception.
+
+ Performs the necessary processing to handle a #VC exception.
+
+ @param[in, out] Ghcb Pointer to the GHCB
+ @param[in, out] ExceptionType Pointer to an EFI_EXCEPTION_TYPE to be set
+ as value to use on error.
+ @param[in, out] SystemContext Pointer to EFI_SYSTEM_CONTEXT
+
+ @retval EFI_SUCCESS Exception handled
+ @retval EFI_UNSUPPORTED #VC not supported, (new) exception value to
+ propagate provided
+ @retval EFI_PROTOCOL_ERROR #VC handling failed, (new) exception value to
+ propagate provided
+
+**/
+EFI_STATUS
+EFIAPI
+CcExitLibInternalVmgExitHandleVc (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_EXCEPTION_TYPE *ExceptionType,
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ EFI_SYSTEM_CONTEXT_X64 *Regs;
+ NAE_EXIT NaeExit;
+ SEV_ES_INSTRUCTION_DATA InstructionData;
+ UINT64 ExitCode, Status;
+ EFI_STATUS VcRet;
+ BOOLEAN InterruptState;
+
+ VcRet = EFI_SUCCESS;
+
+ Regs = SystemContext.SystemContextX64;
+
+ CcExitLibVmgInit (Ghcb, &InterruptState);
+
+ ExitCode = Regs->ExceptionData;
+ switch (ExitCode) {
+ case SVM_EXIT_DR7_READ:
+ NaeExit = Dr7ReadExit;
+ break;
+
+ case SVM_EXIT_DR7_WRITE:
+ NaeExit = Dr7WriteExit;
+ break;
+
+ case SVM_EXIT_RDTSC:
+ NaeExit = RdtscExit;
+ break;
+
+ case SVM_EXIT_RDPMC:
+ NaeExit = RdpmcExit;
+ break;
+
+ case SVM_EXIT_CPUID:
+ NaeExit = CpuidExit;
+ break;
+
+ case SVM_EXIT_INVD:
+ NaeExit = InvdExit;
+ break;
+
+ case SVM_EXIT_IOIO_PROT:
+ NaeExit = IoioExit;
+ break;
+
+ case SVM_EXIT_MSR:
+ NaeExit = MsrExit;
+ break;
+
+ case SVM_EXIT_VMMCALL:
+ NaeExit = VmmCallExit;
+ break;
+
+ case SVM_EXIT_RDTSCP:
+ NaeExit = RdtscpExit;
+ break;
+
+ case SVM_EXIT_WBINVD:
+ NaeExit = WbinvdExit;
+ break;
+
+ case SVM_EXIT_MONITOR:
+ NaeExit = MonitorExit;
+ break;
+
+ case SVM_EXIT_MWAIT:
+ NaeExit = MwaitExit;
+ break;
+
+ case SVM_EXIT_NPF:
+ NaeExit = MmioExit;
+ break;
+
+ default:
+ NaeExit = UnsupportedExit;
+ }
+
+ InitInstructionData (&InstructionData, Ghcb, Regs);
+
+ Status = NaeExit (Ghcb, Regs, &InstructionData);
+ if (Status == 0) {
+ Regs->Rip += InstructionLength (&InstructionData);
+ } else {
+ GHCB_EVENT_INJECTION Event;
+
+ Event.Uint64 = Status;
+ if (Event.Elements.ErrorCodeValid != 0) {
+ Regs->ExceptionData = Event.Elements.ErrorCode;
+ } else {
+ Regs->ExceptionData = 0;
+ }
+
+ *ExceptionType = Event.Elements.Vector;
+
+ VcRet = EFI_PROTOCOL_ERROR;
+ }
+
+ CcExitLibVmgDone (Ghcb, InterruptState);
+
+ return VcRet;
+}
+
+/**
+ Routine to allow ASSERT from within #VC.
+
+ @param[in, out] SevEsData Pointer to the per-CPU data
+
+**/
+VOID
+EFIAPI
+CcExitLibVmgExitIssueAssert (
+ IN OUT SEV_ES_PER_CPU_DATA *SevEsData
+ )
+{
+ //
+ // Progress will be halted, so set VcCount to allow for ASSERT output
+ // to be seen.
+ //
+ SevEsData->VcCount = 0;
+
+ ASSERT (FALSE);
+ CpuDeadLoop ();
+}
diff --git a/OvmfPkg/Library/CcExitLib/CcExitVcHandler.h b/OvmfPkg/Library/CcExitLib/CcExitVcHandler.h
new file mode 100644
index 000000000000..d8084130fae4
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/CcExitVcHandler.h
@@ -0,0 +1,53 @@
+/** @file
+ X64 #VC Exception Handler functon header file.
+
+ Copyright (C) 2020, Advanced Micro Devices, Inc. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef CC_EXIT_VC_HANDLER_H_
+#define CC_EXIT_VC_HANDLER_H_
+
+#include <Base.h>
+#include <Uefi.h>
+#include <Library/CcExitLib.h>
+
+/**
+ Handle a #VC exception.
+
+ Performs the necessary processing to handle a #VC exception.
+
+ @param[in, out] Ghcb Pointer to the GHCB
+ @param[in, out] ExceptionType Pointer to an EFI_EXCEPTION_TYPE to be set
+ as value to use on error.
+ @param[in, out] SystemContext Pointer to EFI_SYSTEM_CONTEXT
+
+ @retval EFI_SUCCESS Exception handled
+ @retval EFI_UNSUPPORTED #VC not supported, (new) exception value to
+ propagate provided
+ @retval EFI_PROTOCOL_ERROR #VC handling failed, (new) exception value to
+ propagate provided
+
+**/
+EFI_STATUS
+EFIAPI
+CcExitLibInternalVmgExitHandleVc (
+ IN OUT GHCB *Ghcb,
+ IN OUT EFI_EXCEPTION_TYPE *ExceptionType,
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ );
+
+/**
+ Routine to allow ASSERT from within #VC.
+
+ @param[in, out] SevEsData Pointer to the per-CPU data
+
+**/
+VOID
+EFIAPI
+CcExitLibVmgExitIssueAssert (
+ IN OUT SEV_ES_PER_CPU_DATA *SevEsData
+ );
+
+#endif
diff --git a/OvmfPkg/Library/CcExitLib/CcExitVeHandler.c b/OvmfPkg/Library/CcExitLib/CcExitVeHandler.c
new file mode 100644
index 000000000000..28bb0af82205
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/CcExitVeHandler.c
@@ -0,0 +1,559 @@
+/** @file
+
+ Copyright (c) 2021, Intel Corporation. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include "CcExitTd.h"
+#include <Library/CcExitLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <IndustryStandard/Tdx.h>
+#include <IndustryStandard/InstructionParsing.h>
+
+typedef union {
+ struct {
+ UINT32 Eax;
+ UINT32 Edx;
+ } Regs;
+ UINT64 Val;
+} MSR_DATA;
+
+typedef union {
+ UINT8 Val;
+ struct {
+ UINT8 B : 1;
+ UINT8 X : 1;
+ UINT8 R : 1;
+ UINT8 W : 1;
+ } Bits;
+} REX;
+
+typedef union {
+ UINT8 Val;
+ struct {
+ UINT8 Rm : 3;
+ UINT8 Reg : 3;
+ UINT8 Mod : 2;
+ } Bits;
+} MODRM;
+
+typedef struct {
+ UINT64 Regs[4];
+} CPUID_DATA;
+
+/**
+ Handle an CPUID event.
+
+ Use the TDVMCALL instruction to handle cpuid #ve
+
+ @param[in, out] Regs x64 processor context
+ @param[in] Veinfo VE Info
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+**/
+STATIC
+UINT64
+EFIAPI
+CpuIdExit (
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN TDCALL_VEINFO_RETURN_DATA *Veinfo
+ )
+{
+ CPUID_DATA CpuIdData;
+ UINT64 Status;
+
+ Status = CcExitLibTdVmCallCpuid (Regs->Rax, Regs->Rcx, &CpuIdData);
+
+ if (Status == 0) {
+ Regs->Rax = CpuIdData.Regs[0];
+ Regs->Rbx = CpuIdData.Regs[1];
+ Regs->Rcx = CpuIdData.Regs[2];
+ Regs->Rdx = CpuIdData.Regs[3];
+ }
+
+ return Status;
+}
+
+/**
+ Handle an IO event.
+
+ Use the TDVMCALL instruction to handle either an IO read or an IO write.
+
+ @param[in, out] Regs x64 processor context
+ @param[in] Veinfo VE Info
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+**/
+STATIC
+UINT64
+EFIAPI
+IoExit (
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN TDCALL_VEINFO_RETURN_DATA *Veinfo
+ )
+{
+ BOOLEAN Write;
+ UINTN Size;
+ UINTN Port;
+ UINT64 Val;
+ UINT64 RepCnt;
+ UINT64 Status;
+
+ Val = 0;
+ Write = Veinfo->ExitQualification.Io.Direction ? FALSE : TRUE;
+ Size = Veinfo->ExitQualification.Io.Size + 1;
+ Port = Veinfo->ExitQualification.Io.Port;
+
+ if (Veinfo->ExitQualification.Io.String) {
+ //
+ // If REP is set, get rep-cnt from Rcx
+ //
+ RepCnt = Veinfo->ExitQualification.Io.Rep ? Regs->Rcx : 1;
+
+ while (RepCnt) {
+ Val = 0;
+ if (Write == TRUE) {
+ CopyMem (&Val, (VOID *)Regs->Rsi, Size);
+ Regs->Rsi += Size;
+ }
+
+ Status = TdVmCall (EXIT_REASON_IO_INSTRUCTION, Size, Write, Port, Val, (Write ? NULL : &Val));
+ if (Status != 0) {
+ break;
+ }
+
+ if (Write == FALSE) {
+ CopyMem ((VOID *)Regs->Rdi, &Val, Size);
+ Regs->Rdi += Size;
+ }
+
+ if (Veinfo->ExitQualification.Io.Rep) {
+ Regs->Rcx -= 1;
+ }
+
+ RepCnt -= 1;
+ }
+ } else {
+ if (Write == TRUE) {
+ CopyMem (&Val, (VOID *)&Regs->Rax, Size);
+ }
+
+ Status = TdVmCall (EXIT_REASON_IO_INSTRUCTION, Size, Write, Port, Val, (Write ? NULL : &Val));
+ if ((Status == 0) && (Write == FALSE)) {
+ CopyMem ((VOID *)&Regs->Rax, &Val, Size);
+ }
+ }
+
+ return Status;
+}
+
+/**
+ Handle an READ MSR event.
+
+ Use the TDVMCALL instruction to handle msr read
+
+ @param[in, out] Regs x64 processor context
+ @param[in] Veinfo VE Info
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+**/
+STATIC
+UINT64
+ReadMsrExit (
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN TDCALL_VEINFO_RETURN_DATA *Veinfo
+ )
+{
+ MSR_DATA Data;
+ UINT64 Status;
+
+ Status = TdVmCall (EXIT_REASON_MSR_READ, Regs->Rcx, 0, 0, 0, &Data);
+ if (Status == 0) {
+ Regs->Rax = Data.Regs.Eax;
+ Regs->Rdx = Data.Regs.Edx;
+ }
+
+ return Status;
+}
+
+/**
+ Handle an WRITE MSR event.
+
+ Use the TDVMCALL instruction to handle msr write
+
+ @param[in, out] Regs x64 processor context
+ @param[in] Veinfo VE Info
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+**/
+STATIC
+UINT64
+WriteMsrExit (
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN TDCALL_VEINFO_RETURN_DATA *Veinfo
+ )
+{
+ UINT64 Status;
+ MSR_DATA Data;
+
+ Data.Regs.Eax = (UINT32)Regs->Rax;
+ Data.Regs.Edx = (UINT32)Regs->Rdx;
+
+ Status = TdVmCall (EXIT_REASON_MSR_WRITE, Regs->Rcx, Data.Val, 0, 0, NULL);
+
+ return Status;
+}
+
+STATIC
+VOID
+EFIAPI
+TdxDecodeInstruction (
+ IN UINT8 *Rip
+ )
+{
+ UINTN i;
+
+ DEBUG ((DEBUG_INFO, "TDX: #TD[EPT] instruction (%p):", Rip));
+ for (i = 0; i < 15; i++) {
+ DEBUG ((DEBUG_INFO, "%02x:", Rip[i]));
+ }
+
+ DEBUG ((DEBUG_INFO, "\n"));
+}
+
+#define TDX_DECODER_BUG_ON(x) \
+ if ((x)) { \
+ TdxDecodeInstruction(Rip); \
+ TdVmCall(TDVMCALL_HALT, 0, 0, 0, 0, 0); \
+ }
+
+STATIC
+UINT64 *
+EFIAPI
+GetRegFromContext (
+ IN EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN UINTN RegIndex
+ )
+{
+ switch (RegIndex) {
+ case 0: return &Regs->Rax;
+ break;
+ case 1: return &Regs->Rcx;
+ break;
+ case 2: return &Regs->Rdx;
+ break;
+ case 3: return &Regs->Rbx;
+ break;
+ case 4: return &Regs->Rsp;
+ break;
+ case 5: return &Regs->Rbp;
+ break;
+ case 6: return &Regs->Rsi;
+ break;
+ case 7: return &Regs->Rdi;
+ break;
+ case 8: return &Regs->R8;
+ break;
+ case 9: return &Regs->R9;
+ break;
+ case 10: return &Regs->R10;
+ break;
+ case 11: return &Regs->R11;
+ break;
+ case 12: return &Regs->R12;
+ break;
+ case 13: return &Regs->R13;
+ break;
+ case 14: return &Regs->R14;
+ break;
+ case 15: return &Regs->R15;
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ Handle an MMIO event.
+
+ Use the TDVMCALL instruction to handle either an mmio read or an mmio write.
+
+ @param[in, out] Regs x64 processor context
+ @param[in] Veinfo VE Info
+
+ @retval 0 Event handled successfully
+ @return New exception value to propagate
+**/
+STATIC
+INTN
+EFIAPI
+MmioExit (
+ IN OUT EFI_SYSTEM_CONTEXT_X64 *Regs,
+ IN TDCALL_VEINFO_RETURN_DATA *Veinfo
+ )
+{
+ UINT64 Status;
+ UINT32 MmioSize;
+ UINT32 RegSize;
+ UINT8 OpCode;
+ BOOLEAN SeenRex;
+ UINT64 *Reg;
+ UINT8 *Rip;
+ UINT64 Val;
+ UINT32 OpSize;
+ MODRM ModRm;
+ REX Rex;
+
+ Rip = (UINT8 *)Regs->Rip;
+ Val = 0;
+ Rex.Val = 0;
+ SeenRex = FALSE;
+
+ //
+ // Default to 32bit transfer
+ //
+ OpSize = 4;
+
+ do {
+ OpCode = *Rip++;
+ if (OpCode == 0x66) {
+ OpSize = 2;
+ } else if ((OpCode == 0x64) || (OpCode == 0x65) || (OpCode == 0x67)) {
+ continue;
+ } else if ((OpCode >= 0x40) && (OpCode <= 0x4f)) {
+ SeenRex = TRUE;
+ Rex.Val = OpCode;
+ } else {
+ break;
+ }
+ } while (TRUE);
+
+ //
+ // We need to have at least 2 more bytes for this instruction
+ //
+ TDX_DECODER_BUG_ON (((UINT64)Rip - Regs->Rip) > 13);
+
+ OpCode = *Rip++;
+ //
+ // Two-byte opecode, get next byte
+ //
+ if (OpCode == 0x0F) {
+ OpCode = *Rip++;
+ }
+
+ switch (OpCode) {
+ case 0x88:
+ case 0x8A:
+ case 0xB6:
+ MmioSize = 1;
+ break;
+ case 0xB7:
+ MmioSize = 2;
+ break;
+ default:
+ MmioSize = Rex.Bits.W ? 8 : OpSize;
+ break;
+ }
+
+ /* Punt on AH/BH/CH/DH unless it shows up. */
+ ModRm.Val = *Rip++;
+ TDX_DECODER_BUG_ON (MmioSize == 1 && ModRm.Bits.Reg > 4 && !SeenRex && OpCode != 0xB6);
+ Reg = GetRegFromContext (Regs, ModRm.Bits.Reg | ((int)Rex.Bits.R << 3));
+ TDX_DECODER_BUG_ON (!Reg);
+
+ if (ModRm.Bits.Rm == 4) {
+ ++Rip; /* SIB byte */
+ }
+
+ if ((ModRm.Bits.Mod == 2) || ((ModRm.Bits.Mod == 0) && (ModRm.Bits.Rm == 5))) {
+ Rip += 4; /* DISP32 */
+ } else if (ModRm.Bits.Mod == 1) {
+ ++Rip; /* DISP8 */
+ }
+
+ switch (OpCode) {
+ case 0x88:
+ case 0x89:
+ CopyMem ((void *)&Val, Reg, MmioSize);
+ Status = TdVmCall (TDVMCALL_MMIO, MmioSize, 1, Veinfo->GuestPA, Val, 0);
+ break;
+ case 0xC7:
+ CopyMem ((void *)&Val, Rip, OpSize);
+ Status = TdVmCall (TDVMCALL_MMIO, MmioSize, 1, Veinfo->GuestPA, Val, 0);
+ Rip += OpSize;
+ default:
+ //
+ // 32-bit write registers are zero extended to the full register
+ // Hence 'MOVZX r[32/64], r/m16' is
+ // hardcoded to reg size 8, and the straight MOV case has a reg
+ // size of 8 in the 32-bit read case.
+ //
+ switch (OpCode) {
+ case 0xB6:
+ RegSize = Rex.Bits.W ? 8 : OpSize;
+ break;
+ case 0xB7:
+ RegSize = 8;
+ break;
+ default:
+ RegSize = MmioSize == 4 ? 8 : MmioSize;
+ break;
+ }
+
+ Status = TdVmCall (TDVMCALL_MMIO, MmioSize, 0, Veinfo->GuestPA, 0, &Val);
+ if (Status == 0) {
+ ZeroMem (Reg, RegSize);
+ CopyMem (Reg, (void *)&Val, MmioSize);
+ }
+ }
+
+ if (Status == 0) {
+ TDX_DECODER_BUG_ON (((UINT64)Rip - Regs->Rip) > 15);
+
+ //
+ // We change instruction length to reflect true size so handler can
+ // bump rip
+ //
+ Veinfo->ExitInstructionLength = (UINT32)((UINT64)Rip - Regs->Rip);
+ }
+
+ return Status;
+}
+
+/**
+ Handle a #VE exception.
+
+ Performs the necessary processing to handle a #VE exception.
+
+ @param[in, out] ExceptionType Pointer to an EFI_EXCEPTION_TYPE to be set
+ as value to use on error.
+ @param[in, out] SystemContext Pointer to EFI_SYSTEM_CONTEXT
+
+ @retval EFI_SUCCESS Exception handled
+ @retval EFI_UNSUPPORTED #VE not supported, (new) exception value to
+ propagate provided
+ @retval EFI_PROTOCOL_ERROR #VE handling failed, (new) exception value to
+ propagate provided
+
+**/
+EFI_STATUS
+EFIAPI
+CcExitHandleVe (
+ IN OUT EFI_EXCEPTION_TYPE *ExceptionType,
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINT64 Status;
+ TD_RETURN_DATA ReturnData;
+ EFI_SYSTEM_CONTEXT_X64 *Regs;
+
+ Regs = SystemContext.SystemContextX64;
+ Status = TdCall (TDCALL_TDGETVEINFO, 0, 0, 0, &ReturnData);
+ ASSERT (Status == 0);
+ if (Status != 0) {
+ DEBUG ((DEBUG_ERROR, "#VE happened. TDGETVEINFO failed with Status = 0x%llx\n", Status));
+ TdVmCall (TDVMCALL_HALT, 0, 0, 0, 0, 0);
+ }
+
+ switch (ReturnData.VeInfo.ExitReason) {
+ case EXIT_REASON_CPUID:
+ Status = CpuIdExit (Regs, &ReturnData.VeInfo);
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "CPUID #VE happened, ExitReasion is %d, ExitQualification = 0x%x.\n",
+ ReturnData.VeInfo.ExitReason,
+ ReturnData.VeInfo.ExitQualification.Val
+ ));
+ break;
+
+ case EXIT_REASON_HLT:
+ Status = TdVmCall (EXIT_REASON_HLT, 0, 0, 0, 0, 0);
+ break;
+
+ case EXIT_REASON_IO_INSTRUCTION:
+ Status = IoExit (Regs, &ReturnData.VeInfo);
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "IO_Instruction #VE happened, ExitReasion is %d, ExitQualification = 0x%x.\n",
+ ReturnData.VeInfo.ExitReason,
+ ReturnData.VeInfo.ExitQualification.Val
+ ));
+ break;
+
+ case EXIT_REASON_MSR_READ:
+ Status = ReadMsrExit (Regs, &ReturnData.VeInfo);
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "RDMSR #VE happened, ExitReasion is %d, ExitQualification = 0x%x. Regs->Rcx=0x%llx, Status = 0x%llx\n",
+ ReturnData.VeInfo.ExitReason,
+ ReturnData.VeInfo.ExitQualification.Val,
+ Regs->Rcx,
+ Status
+ ));
+ break;
+
+ case EXIT_REASON_MSR_WRITE:
+ Status = WriteMsrExit (Regs, &ReturnData.VeInfo);
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "WRMSR #VE happened, ExitReasion is %d, ExitQualification = 0x%x. Regs->Rcx=0x%llx, Status = 0x%llx\n",
+ ReturnData.VeInfo.ExitReason,
+ ReturnData.VeInfo.ExitQualification.Val,
+ Regs->Rcx,
+ Status
+ ));
+ break;
+
+ case EXIT_REASON_EPT_VIOLATION:
+ Status = MmioExit (Regs, &ReturnData.VeInfo);
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "MMIO #VE happened, ExitReasion is %d, ExitQualification = 0x%x.\n",
+ ReturnData.VeInfo.ExitReason,
+ ReturnData.VeInfo.ExitQualification.Val
+ ));
+ break;
+
+ case EXIT_REASON_VMCALL:
+ case EXIT_REASON_MWAIT_INSTRUCTION:
+ case EXIT_REASON_MONITOR_INSTRUCTION:
+ case EXIT_REASON_WBINVD:
+ case EXIT_REASON_RDPMC:
+ /* Handle as nops. */
+ break;
+
+ default:
+ DEBUG ((
+ DEBUG_ERROR,
+ "Unsupported #VE happened, ExitReason is %d, ExitQualification = 0x%x.\n",
+ ReturnData.VeInfo.ExitReason,
+ ReturnData.VeInfo.ExitQualification.Val
+ ));
+
+ ASSERT (FALSE);
+ CpuDeadLoop ();
+ }
+
+ if (Status) {
+ DEBUG ((
+ DEBUG_ERROR,
+ "#VE Error (0x%llx) returned from host, ExitReason is %d, ExitQualification = 0x%x.\n",
+ Status,
+ ReturnData.VeInfo.ExitReason,
+ ReturnData.VeInfo.ExitQualification.Val
+ ));
+
+ TdVmCall (TDVMCALL_HALT, 0, 0, 0, 0, 0);
+ }
+
+ SystemContext.SystemContextX64->Rip += ReturnData.VeInfo.ExitInstructionLength;
+ return EFI_SUCCESS;
+}
diff --git a/OvmfPkg/Library/CcExitLib/PeiDxeCcExitVcHandler.c b/OvmfPkg/Library/CcExitLib/PeiDxeCcExitVcHandler.c
new file mode 100644
index 000000000000..d8f78040c2e8
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/PeiDxeCcExitVcHandler.c
@@ -0,0 +1,103 @@
+/** @file
+ X64 #VC Exception Handler functon.
+
+ Copyright (C) 2020, Advanced Micro Devices, Inc. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Base.h>
+#include <Uefi.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemEncryptSevLib.h>
+#include <Library/CcExitLib.h>
+#include <Register/Amd/Msr.h>
+
+#include "CcExitVcHandler.h"
+
+/**
+ Handle a #VC exception.
+
+ Performs the necessary processing to handle a #VC exception.
+
+ @param[in, out] ExceptionType Pointer to an EFI_EXCEPTION_TYPE to be set
+ as value to use on error.
+ @param[in, out] SystemContext Pointer to EFI_SYSTEM_CONTEXT
+
+ @retval EFI_SUCCESS Exception handled
+ @retval EFI_UNSUPPORTED #VC not supported, (new) exception value to
+ propagate provided
+ @retval EFI_PROTOCOL_ERROR #VC handling failed, (new) exception value to
+ propagate provided
+
+**/
+EFI_STATUS
+EFIAPI
+CcExitHandleVc (
+ IN OUT EFI_EXCEPTION_TYPE *ExceptionType,
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+ GHCB *Ghcb;
+ GHCB *GhcbBackup;
+ EFI_STATUS VcRet;
+ BOOLEAN InterruptState;
+ SEV_ES_PER_CPU_DATA *SevEsData;
+
+ InterruptState = GetInterruptState ();
+ if (InterruptState) {
+ DisableInterrupts ();
+ }
+
+ Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+ ASSERT (Msr.GhcbInfo.Function == 0);
+ ASSERT (Msr.Ghcb != 0);
+
+ Ghcb = Msr.Ghcb;
+ GhcbBackup = NULL;
+
+ SevEsData = (SEV_ES_PER_CPU_DATA *)(Ghcb + 1);
+ SevEsData->VcCount++;
+
+ //
+ // Check for maximum PEI/DXE #VC nesting.
+ //
+ if (SevEsData->VcCount > VMGEXIT_MAXIMUM_VC_COUNT) {
+ CcExitLibVmgExitIssueAssert (SevEsData);
+ } else if (SevEsData->VcCount > 1) {
+ //
+ // Nested #VC
+ //
+ if (SevEsData->GhcbBackupPages == NULL) {
+ CcExitLibVmgExitIssueAssert (SevEsData);
+ }
+
+ //
+ // Save the active GHCB to a backup page.
+ // To access the correct backup page, increment the backup page pointer
+ // based on the current VcCount.
+ //
+ GhcbBackup = (GHCB *)SevEsData->GhcbBackupPages;
+ GhcbBackup += (SevEsData->VcCount - 2);
+
+ CopyMem (GhcbBackup, Ghcb, sizeof (*Ghcb));
+ }
+
+ VcRet = CcExitLibInternalVmgExitHandleVc (Ghcb, ExceptionType, SystemContext);
+
+ if (GhcbBackup != NULL) {
+ //
+ // Restore the active GHCB from the backup page.
+ //
+ CopyMem (Ghcb, GhcbBackup, sizeof (*Ghcb));
+ }
+
+ SevEsData->VcCount--;
+
+ if (InterruptState) {
+ EnableInterrupts ();
+ }
+
+ return VcRet;
+}
diff --git a/OvmfPkg/Library/CcExitLib/SecCcExitLib.inf b/OvmfPkg/Library/CcExitLib/SecCcExitLib.inf
new file mode 100644
index 000000000000..c4425eed5fef
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/SecCcExitLib.inf
@@ -0,0 +1,48 @@
+## @file
+# VMGEXIT Support Library.
+#
+# Copyright (C) 2020, Advanced Micro Devices, Inc. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = SecCcExitLib
+ FILE_GUID = 325cb20c-90e2-42a1-8667-56752d0b149c
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = CcExitLib|SEC
+
+#
+# The following information is for reference only and not required by the build tools.
+#
+# VALID_ARCHITECTURES = X64
+#
+
+[Sources.common]
+ CcExitLib.c
+ CcExitVcHandler.c
+ CcExitVcHandler.h
+ SecCcExitVcHandler.c
+ CcExitVeHandler.c
+ X64/TdVmcallCpuid.nasm
+
+[Packages]
+ MdePkg/MdePkg.dec
+ OvmfPkg/OvmfPkg.dec
+ UefiCpuPkg/UefiCpuPkg.dec
+
+[LibraryClasses]
+ BaseLib
+ BaseMemoryLib
+ DebugLib
+ LocalApicLib
+ MemEncryptSevLib
+ PcdLib
+
+[FixedPcd]
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfSecGhcbBackupSize
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidBase
+ gUefiOvmfPkgTokenSpaceGuid.PcdOvmfCpuidSize
diff --git a/OvmfPkg/Library/CcExitLib/SecCcExitVcHandler.c b/OvmfPkg/Library/CcExitLib/SecCcExitVcHandler.c
new file mode 100644
index 000000000000..4a748ed9dec8
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/SecCcExitVcHandler.c
@@ -0,0 +1,109 @@
+/** @file
+ X64 #VC Exception Handler functon.
+
+ Copyright (C) 2020, Advanced Micro Devices, Inc. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Base.h>
+#include <Uefi.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemEncryptSevLib.h>
+#include <Library/CcExitLib.h>
+#include <Register/Amd/Msr.h>
+
+#include "CcExitVcHandler.h"
+
+/**
+ Handle a #VC exception.
+
+ Performs the necessary processing to handle a #VC exception.
+
+ @param[in, out] ExceptionType Pointer to an EFI_EXCEPTION_TYPE to be set
+ as value to use on error.
+ @param[in, out] SystemContext Pointer to EFI_SYSTEM_CONTEXT
+
+ @retval EFI_SUCCESS Exception handled
+ @retval EFI_UNSUPPORTED #VC not supported, (new) exception value to
+ propagate provided
+ @retval EFI_PROTOCOL_ERROR #VC handling failed, (new) exception value to
+ propagate provided
+
+**/
+EFI_STATUS
+EFIAPI
+CcExitHandleVc (
+ IN OUT EFI_EXCEPTION_TYPE *ExceptionType,
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ MSR_SEV_ES_GHCB_REGISTER Msr;
+ GHCB *Ghcb;
+ GHCB *GhcbBackup;
+ EFI_STATUS VcRet;
+ BOOLEAN InterruptState;
+ SEV_ES_PER_CPU_DATA *SevEsData;
+
+ InterruptState = GetInterruptState ();
+ if (InterruptState) {
+ DisableInterrupts ();
+ }
+
+ Msr.GhcbPhysicalAddress = AsmReadMsr64 (MSR_SEV_ES_GHCB);
+ ASSERT (Msr.GhcbInfo.Function == 0);
+ ASSERT (Msr.Ghcb != 0);
+
+ Ghcb = Msr.Ghcb;
+ GhcbBackup = NULL;
+
+ SevEsData = (SEV_ES_PER_CPU_DATA *)(Ghcb + 1);
+ SevEsData->VcCount++;
+
+ //
+ // Check for maximum SEC #VC nesting.
+ //
+ if (SevEsData->VcCount > VMGEXIT_MAXIMUM_VC_COUNT) {
+ CcExitLibVmgExitIssueAssert (SevEsData);
+ } else if (SevEsData->VcCount > 1) {
+ UINTN GhcbBackupSize;
+
+ //
+ // Be sure that the proper amount of pages are allocated
+ //
+ GhcbBackupSize = (VMGEXIT_MAXIMUM_VC_COUNT - 1) * sizeof (*Ghcb);
+ if (GhcbBackupSize > FixedPcdGet32 (PcdOvmfSecGhcbBackupSize)) {
+ //
+ // Not enough SEC backup pages allocated.
+ //
+ CcExitLibVmgExitIssueAssert (SevEsData);
+ }
+
+ //
+ // Save the active GHCB to a backup page.
+ // To access the correct backup page, increment the backup page pointer
+ // based on the current VcCount.
+ //
+ GhcbBackup = (GHCB *)FixedPcdGet32 (PcdOvmfSecGhcbBackupBase);
+ GhcbBackup += (SevEsData->VcCount - 2);
+
+ CopyMem (GhcbBackup, Ghcb, sizeof (*Ghcb));
+ }
+
+ VcRet = CcExitLibInternalVmgExitHandleVc (Ghcb, ExceptionType, SystemContext);
+
+ if (GhcbBackup != NULL) {
+ //
+ // Restore the active GHCB from the backup page.
+ //
+ CopyMem (Ghcb, GhcbBackup, sizeof (*Ghcb));
+ }
+
+ SevEsData->VcCount--;
+
+ if (InterruptState) {
+ EnableInterrupts ();
+ }
+
+ return VcRet;
+}
diff --git a/OvmfPkg/Library/CcExitLib/X64/TdVmcallCpuid.nasm b/OvmfPkg/Library/CcExitLib/X64/TdVmcallCpuid.nasm
new file mode 100644
index 000000000000..e7e79885d472
--- /dev/null
+++ b/OvmfPkg/Library/CcExitLib/X64/TdVmcallCpuid.nasm
@@ -0,0 +1,146 @@
+;------------------------------------------------------------------------------
+;*
+;* Copyright (c) 2020 - 2021, Intel Corporation. All rights reserved.<BR>
+;* SPDX-License-Identifier: BSD-2-Clause-Patent
+;*
+;*
+;------------------------------------------------------------------------------
+
+DEFAULT REL
+SECTION .text
+
+%define TDVMCALL_EXPOSE_REGS_MASK 0xffec
+%define TDVMCALL 0x0
+%define EXIT_REASON_CPUID 0xa
+
+%macro tdcall 0
+ db 0x66,0x0f,0x01,0xcc
+%endmacro
+
+%macro tdcall_push_regs 0
+ push rbp
+ mov rbp, rsp
+ push r15
+ push r14
+ push r13
+ push r12
+ push rbx
+ push rsi
+ push rdi
+%endmacro
+
+%macro tdcall_pop_regs 0
+ pop rdi
+ pop rsi
+ pop rbx
+ pop r12
+ pop r13
+ pop r14
+ pop r15
+ pop rbp
+%endmacro
+
+%define number_of_regs_pushed 8
+%define number_of_parameters 4
+
+;
+; Keep these in sync for push_regs/pop_regs, code below
+; uses them to find 5th or greater parameters
+;
+%define first_variable_on_stack_offset \
+ ((number_of_regs_pushed * 8) + (number_of_parameters * 8) + 8)
+%define second_variable_on_stack_offset \
+ ((first_variable_on_stack_offset) + 8)
+
+%macro tdcall_regs_preamble 2
+ mov rax, %1
+
+ xor rcx, rcx
+ mov ecx, %2
+
+ ; R10 = 0 (standard TDVMCALL)
+
+ xor r10d, r10d
+
+ ; Zero out unused (for standard TDVMCALL) registers to avoid leaking
+ ; secrets to the VMM.
+
+ xor ebx, ebx
+ xor esi, esi
+ xor edi, edi
+
+ xor edx, edx
+ xor ebp, ebp
+ xor r8d, r8d
+ xor r9d, r9d
+ xor r14, r14
+ xor r15, r15
+%endmacro
+
+%macro tdcall_regs_postamble 0
+ xor ebx, ebx
+ xor esi, esi
+ xor edi, edi
+
+ xor ecx, ecx
+ xor edx, edx
+ xor r8d, r8d
+ xor r9d, r9d
+ xor r10d, r10d
+ xor r11d, r11d
+%endmacro
+
+;------------------------------------------------------------------------------
+; 0 => RAX = TDCALL leaf / TDVMCALL
+; M => RCX = TDVMCALL register behavior
+; 0xa => R11 = TDVMCALL function / CPUID
+; RCX => R12 = p1
+; RDX => R13 = p2
+;
+; UINT64
+; EFIAPI
+; TdVmCallCpuid (
+; UINT64 EaxIn, // Rcx
+; UINT64 EcxIn, // Rdx
+; UINT64 *Results // R8
+; )
+global ASM_PFX(CcExitLibTdVmCallCpuid)
+ASM_PFX(CcExitLibTdVmCallCpuid):
+ tdcall_push_regs
+
+ mov r11, EXIT_REASON_CPUID
+ mov r12, rcx
+ mov r13, rdx
+
+ ; Save *results pointers
+ push r8
+
+ tdcall_regs_preamble TDVMCALL, TDVMCALL_EXPOSE_REGS_MASK
+
+ tdcall
+
+ ; ignore return data if TDCALL reports failure.
+ test rax, rax
+ jnz .no_return_data
+
+ ; Propagate TDVMCALL success/failure to return value.
+ mov rax, r10
+ test rax, rax
+ jnz .no_return_data
+
+ ; Retrieve *Results
+ pop r8
+ test r8, r8
+ jz .no_return_data
+ ; Caller pass in buffer so store results r12-r15 contains eax-edx
+ mov [r8 + 0], r12
+ mov [r8 + 8], r13
+ mov [r8 + 16], r14
+ mov [r8 + 24], r15
+
+.no_return_data:
+ tdcall_regs_postamble
+
+ tdcall_pop_regs
+
+ ret
--
2.29.2.windows.2
next prev parent reply other threads:[~2022-11-03 23:20 UTC|newest]
Thread overview: 13+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-03 23:19 [PATCH V2 0/9] Rename VmgExitLib to CcExitLib Min Xu
2022-11-03 23:19 ` [PATCH V2 1/9] UefiCpuPkg: Add CcExitLib Min Xu
2022-11-04 14:34 ` [edk2-devel] " Lendacky, Thomas
2022-11-05 1:33 ` Min Xu
2022-11-03 23:19 ` Min Xu [this message]
2022-11-03 23:19 ` [PATCH V2 3/9] OvmfPkg: Add CcExitLib in *.dsc Min Xu
2022-11-03 23:19 ` [PATCH V2 4/9] UefiCpuPkg: Use CcExitLib instead of VmgExitLib Min Xu
2022-11-03 23:19 ` [PATCH V2 5/9] UefiPayloadPkg: " Min Xu
2022-11-03 23:19 ` [PATCH V2 6/9] OvmfPkg: " Min Xu
2022-11-03 23:19 ` [PATCH V2 7/9] OvmfPkg: Delete VmgExitLib Min Xu
2022-11-03 23:19 ` [PATCH V2 8/9] UefiCpuPkg: " Min Xu
2022-11-03 23:19 ` [PATCH V2 9/9] Maintainers: Update the VmgExitLib to CcExitLib Min Xu
2022-11-04 2:04 ` [PATCH V2 0/9] Rename " Yao, Jiewen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-list from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221103231955.1365-3-min.m.xu@intel.com \
--to=devel@edk2.groups.io \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox