public inbox for devel@edk2.groups.io
 help / color / mirror / Atom feed
From: "Dhaval Sharma" <dhaval@rivosinc.com>
To: devel@edk2.groups.io
Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>,
	Jiewen Yao <jiewen.yao@intel.com>,
	Jordan Justen <jordan.l.justen@intel.com>,
	Gerd Hoffmann <kraxel@redhat.com>,
	Sunil V L <sunilvl@ventanamicro.com>,
	Andrei Warkentin <andrei.warkentin@intel.com>
Subject: [PATCH v4 1/1] MdePkg:Implement RISCV CMO
Date: Thu, 13 Jul 2023 15:03:31 +0530	[thread overview]
Message-ID: <20230713093331.267164-2-dhaval@rivosinc.com> (raw)
In-Reply-To: <20230713093331.267164-1-dhaval@rivosinc.com>

From: Dhaval Sharma <dhaval@rivosinc.com>

Implementing code to support Cache Management Operations
(CMO) defined by RV spec https://github.com/riscv/riscv-CMOs

Notes:
1. CMO only supports block based Operations. Meaning complete
   cache flush/invd/clean Operations are not available. In that case
   we fallback on fence.i instructions.
2. Rely on the fact that platform init has initialized CMO and this
   implementation just checks if it is enabled.
3. In order to avoid compiler dependency injecting byte code.

Test:
1. Ensured correct instructions are refelecting in asm
2. Able to boot platform with RiscVVirtQemu config
3. Not able to verify actual instruction in HW as Qemu ignores
any actual cache operations.

Cc: Ard Biesheuvel <ardb+tianocore@kernel.org>
Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Sunil V L <sunilvl@ventanamicro.com>
Cc: Andrei Warkentin <andrei.warkentin@intel.com>
Signed-off-by: Dhaval Sharma <dhaval@rivosinc.com>
---

Notes:
    v4:
    - Removed CMO specific directory in Base Lib
    - Implemented compiler independent code for CMO
    - Merged CMO implementation with fence.i
    - Added logic to confirm CMO is enabled

 MdePkg/Library/BaseLib/BaseLib.inf                  |   2 +-
 MdePkg/Include/Register/RiscV64/RiscVEncoding.h     |   4 +
 MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c | 200 ++++++++++++++++++--
 MdePkg/Library/BaseLib/RiscV64/FlushCache.S         |  21 --
 MdePkg/Library/BaseLib/RiscV64/RiscVCacheMgmt.S     |  64 +++++++
 5 files changed, 254 insertions(+), 37 deletions(-)

diff --git a/MdePkg/Library/BaseLib/BaseLib.inf b/MdePkg/Library/BaseLib/BaseLib.inf
index 03c7b02e828b..53389389448c 100644
--- a/MdePkg/Library/BaseLib/BaseLib.inf
+++ b/MdePkg/Library/BaseLib/BaseLib.inf
@@ -400,7 +400,7 @@ [Sources.RISCV64]
   RiscV64/RiscVCpuBreakpoint.S      | GCC
   RiscV64/RiscVCpuPause.S           | GCC
   RiscV64/RiscVInterrupt.S          | GCC
-  RiscV64/FlushCache.S              | GCC
+  RiscV64/RiscVCacheMgmt.S          | GCC
   RiscV64/CpuScratch.S              | GCC
   RiscV64/ReadTimer.S               | GCC
   RiscV64/RiscVMmu.S                | GCC
diff --git a/MdePkg/Include/Register/RiscV64/RiscVEncoding.h b/MdePkg/Include/Register/RiscV64/RiscVEncoding.h
index 5c2989b797bf..ea1493578bd5 100644
--- a/MdePkg/Include/Register/RiscV64/RiscVEncoding.h
+++ b/MdePkg/Include/Register/RiscV64/RiscVEncoding.h
@@ -85,6 +85,10 @@
 /* Supervisor Configuration */
 #define CSR_SENVCFG  0x10a
 
+/* Defined CBO bits*/
+#define SENVCFG_CBCFE  0x40UL
+#define SENVCFG_CBIE   0x30UL
+
 /* Supervisor Trap Handling */
 #define CSR_SSCRATCH  0x140
 #define CSR_SEPC      0x141
diff --git a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
index d08fb9f193ca..8b853e5b69fa 100644
--- a/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
+++ b/MdePkg/Library/BaseCacheMaintenanceLib/RiscVCache.c
@@ -1,7 +1,8 @@
 /** @file
-  RISC-V specific functionality for cache.
+  Implement Risc-V Cache Management Operations
 
   Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
+  Copyright (c) 2023, Rivos Inc. All rights reserved.<BR>
 
   SPDX-License-Identifier: BSD-2-Clause-Patent
 **/
@@ -10,13 +11,21 @@
 #include <Library/BaseLib.h>
 #include <Library/DebugLib.h>
 
+#define RV64_CACHE_BLOCK_SIZE  64
+
+typedef enum {
+  Clean,
+  Flush,
+  Invld,
+} CACHE_OP;
+
 /**
   RISC-V invalidate instruction cache.
 
 **/
 VOID
 EFIAPI
-RiscVInvalidateInstCacheAsm (
+RiscVInvalidateInstCacheAsm_Fence (
   VOID
   );
 
@@ -26,13 +35,144 @@ RiscVInvalidateInstCacheAsm (
 **/
 VOID
 EFIAPI
-RiscVInvalidateDataCacheAsm (
+RiscVInvalidateDataCacheAsm_Fence (
   VOID
   );
 
+/**
+  RISC-V flush cache block. Atomically perform a clean operation
+  followed by an invalidate operation
+
+**/
+VOID
+EFIAPI
+RiscVCpuCacheFlushAsm_Cbo (
+  UINTN
+  );
+
+/**
+Perform a write transfer to another cache or to memory if the
+data in the copy of the cache block have been modified by a store
+operation
+
+**/
+VOID
+EFIAPI
+RiscVCpuCacheCleanAsm_Cbo (
+  UINTN
+  );
+
+/**
+Deallocate the copy of the cache block
+
+**/
+VOID
+EFIAPI
+RiscVCpuCacheInvalAsm_Cbo (
+  UINTN
+  );
+
+/**
+Verify CBOs are supported by this HW
+CBCFE == Cache Block Clean and Flush instruction Enable
+CBIE == Cache Block Invalidate instruction Enable
+
+**/
+UINTN
+RiscvIsCbcfeEnabledAsm (
+  VOID
+  );
+
+UINTN
+RiscvIsCbiEnabledAsm (
+  VOID
+  );
+
+/**
+  Performs required opeartion on cache lines in the cache coherency domain
+  of the calling CPU. If Address is not aligned on a cache line boundary,
+  then entire cache line containing Address is operated. If Address + Length
+  is not aligned on a cache line boundary, then the entire cache line
+  containing Address + Length -1 is operated.
+
+  If Length is greater than (MAX_ADDRESS - Address + 1), then ASSERT().
+
+  @param  Address The base address of the cache lines to
+          invalidate. If the CPU is in a physical addressing mode,
+          then Address is a physical address. If the CPU is in a virtual
+          addressing mode, then Address is a virtual address.
+
+  @param  Length  The number of bytes to invalidate from the instruction
+          cache.
+
+  @param  Op  Type of CMO operation to be performed
+
+  @return Address.
+
+**/
+VOID *
+EFIAPI
+CacheOpCacheRange (
+  IN VOID      *Address,
+  IN UINTN     Length,
+  IN CACHE_OP  Op
+  )
+{
+  UINTN  CacheLineSize;
+  UINTN  Start;
+  UINTN  End;
+
+  if (Length == 0) {
+    return Address;
+  }
+
+  ASSERT ((Length - 1) <= (MAX_ADDRESS - (UINTN)Address));
+
+  //
+  // Cache line size is 8 * Bits 15-08 of EBX returned from CPUID 01H
+  //
+  CacheLineSize = RV64_CACHE_BLOCK_SIZE;
+
+  Start = (UINTN)Address;
+  //
+  // Calculate the cache line alignment
+  //
+  End    = (Start + Length + (CacheLineSize - 1)) & ~(CacheLineSize - 1);
+  Start &= ~((UINTN)CacheLineSize - 1);
+
+  DEBUG (
+    (DEBUG_INFO,
+     "%a Performing Cache Management Operation %d \n", __func__, Op)
+    );
+
+  do {
+    switch (Op) {
+      case Invld:
+        RiscVCpuCacheInvalAsm_Cbo (Start);
+        break;
+      case Flush:
+        RiscVCpuCacheFlushAsm_Cbo (Start);
+        break;
+      case Clean:
+        RiscVCpuCacheCleanAsm_Cbo (Start);
+        break;
+      default:
+        DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported operation\n"));
+        break;
+    }
+
+    Start = Start + CacheLineSize;
+  } while (Start != End);
+
+  return Address;
+}
+
 /**
   Invalidates the entire instruction cache in cache coherency domain of the
-  calling CPU.
+  calling CPU. Risc-V does not have currently an CBO implementation which can
+  invalidate entire I-cache. Hence using Fence instruction for now. P.S. Fence
+  instruction may or may not implement full I-cache invd functionality on all
+  implementations.
 
 **/
 VOID
@@ -41,7 +181,7 @@ InvalidateInstructionCache (
   VOID
   )
 {
-  RiscVInvalidateInstCacheAsm ();
+  RiscVInvalidateInstCacheAsm_Fence ();
 }
 
 /**
@@ -76,12 +216,17 @@ InvalidateInstructionCacheRange (
   IN UINTN  Length
   )
 {
-  DEBUG (
-    (DEBUG_WARN,
-     "%a:RISC-V unsupported function.\n"
-     "Invalidating the whole instruction cache instead.\n", __func__)
-    );
-  InvalidateInstructionCache ();
+  if (RiscvIsCbiEnabledAsm () == RETURN_SUCCESS) {
+    CacheOpCacheRange (Address, Length, Invld);
+  } else {
+    DEBUG (
+      (DEBUG_WARN,
+       "%a:RISC-V unsupported function.\n"
+       "Invalidating the whole instruction cache instead.\n", __func__)
+      );
+    InvalidateInstructionCache ();
+  }
+
   return Address;
 }
 
@@ -137,7 +282,12 @@ WriteBackInvalidateDataCacheRange (
   IN      UINTN  Length
   )
 {
-  DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __func__));
+  if (RiscvIsCbcfeEnabledAsm () == RETURN_SUCCESS) {
+    CacheOpCacheRange (Address, Length, Flush);
+  } else {
+    DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __func__));
+  }
+
   return Address;
 }
 
@@ -192,7 +342,12 @@ WriteBackDataCacheRange (
   IN      UINTN  Length
   )
 {
-  DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __func__));
+  if (RiscvIsCbcfeEnabledAsm () == RETURN_SUCCESS) {
+    CacheOpCacheRange (Address, Length, Clean);
+  } else {
+    DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __func__));
+  }
+
   return Address;
 }
 
@@ -213,7 +368,12 @@ InvalidateDataCache (
   VOID
   )
 {
-  RiscVInvalidateDataCacheAsm ();
+  DEBUG (
+    (DEBUG_WARN,
+     "%a:RISC-V unsupported function.\n"
+     "Invalidating the whole instruction cache instead.\n", __func__)
+    );
+  RiscVInvalidateDataCacheAsm_Fence ();
 }
 
 /**
@@ -250,6 +410,16 @@ InvalidateDataCacheRange (
   IN      UINTN  Length
   )
 {
-  DEBUG ((DEBUG_ERROR, "%a:RISC-V unsupported function.\n", __func__));
+  if (RiscvIsCbiEnabledAsm () == RETURN_SUCCESS) {
+    CacheOpCacheRange (Address, Length, Invld);
+  } else {
+    DEBUG (
+      (DEBUG_WARN,
+       "%a:RISC-V unsupported function.\n"
+       "Invalidating the whole instruction cache instead.\n", __func__)
+      );
+    InvalidateDataCache ();
+  }
+
   return Address;
 }
diff --git a/MdePkg/Library/BaseLib/RiscV64/FlushCache.S b/MdePkg/Library/BaseLib/RiscV64/FlushCache.S
deleted file mode 100644
index 7c10fdd268af..000000000000
--- a/MdePkg/Library/BaseLib/RiscV64/FlushCache.S
+++ /dev/null
@@ -1,21 +0,0 @@
-//------------------------------------------------------------------------------
-//
-// RISC-V cache operation.
-//
-// Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
-//
-// SPDX-License-Identifier: BSD-2-Clause-Patent
-//
-//------------------------------------------------------------------------------
-
-.align 3
-ASM_GLOBAL ASM_PFX(RiscVInvalidateInstCacheAsm)
-ASM_GLOBAL ASM_PFX(RiscVInvalidateDataCacheAsm)
-
-ASM_PFX(RiscVInvalidateInstCacheAsm):
-    fence.i
-    ret
-
-ASM_PFX(RiscVInvalidateDataCacheAsm):
-    fence
-    ret
diff --git a/MdePkg/Library/BaseLib/RiscV64/RiscVCacheMgmt.S b/MdePkg/Library/BaseLib/RiscV64/RiscVCacheMgmt.S
new file mode 100644
index 000000000000..ecf391632221
--- /dev/null
+++ b/MdePkg/Library/BaseLib/RiscV64/RiscVCacheMgmt.S
@@ -0,0 +1,64 @@
+//------------------------------------------------------------------------------
+//
+// RISC-V cache operation.
+//
+// Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
+// Copyright (c) 2022, Rivos Inc. All rights reserved.<BR>
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+//------------------------------------------------------------------------------
+#include <Register/RiscV64/RiscVImpl.h>
+
+.align 3
+ASM_GLOBAL ASM_PFX(RiscVInvalidateInstCacheAsm_Fence)
+ASM_GLOBAL ASM_PFX(RiscVInvalidateDataCacheAsm_Fence)
+
+ASM_PFX(RiscVInvalidateInstCacheAsm_Fence):
+    fence.i
+    ret
+
+ASM_PFX(RiscVInvalidateDataCacheAsm_Fence):
+    fence
+    ret
+
+ASM_GLOBAL ASM_PFX (RiscVCpuCacheFlushAsm_Cbo)
+ASM_PFX (RiscVCpuCacheFlushAsm_Cbo):
+
+  .long 0x0025200f
+  ret
+
+ASM_GLOBAL ASM_PFX (RiscVCpuCacheCleanAsm_Cbo)
+ASM_PFX (RiscVCpuCacheCleanAsm_Cbo):
+  .long 0x0015200f
+  ret
+
+ASM_GLOBAL ASM_PFX (RiscVCpuCacheInvalAsm_Cbo)
+ASM_PFX (RiscVCpuCacheInvalAsm_Cbo):
+  .long 0x0005200f
+  ret
+
+ASM_GLOBAL ASM_PFX(RiscvIsCbiEnabledAsm)
+ASM_PFX(RiscvIsCbiEnabledAsm):
+    li    a0, 3
+    csrr  a1, CSR_SENVCFG
+    and   a1, a1, SENVCFG_CBIE
+    beqz  a1, skip
+
+    and   a1, a1, 0x10
+    beqz  a1, skip
+
+    mv    a0, x0
+skip:
+    ret
+
+ASM_GLOBAL ASM_PFX(RiscvIsCbcfeEnabledAsm)
+ASM_PFX(RiscvIsCbcfeEnabledAsm):
+    li    a0, 3
+    csrr  a1, CSR_SENVCFG
+    and   a1, a1, SENVCFG_CBCFE
+    beqz  a1, next
+
+    mv    a0, x0
+next:
+    ret
-- 
2.34.1


  reply	other threads:[~2023-07-13  9:33 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-07-13  9:33 [PATCH v4 0/1] MdePkg:Implement RISCV CMO Dhaval Sharma
2023-07-13  9:33 ` Dhaval Sharma [this message]
2023-07-24  4:33   ` [edk2-devel] [PATCH v4 1/1] " Sunil V L
2023-07-27  5:47     ` Dhaval Sharma

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-list from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230713093331.267164-2-dhaval@rivosinc.com \
    --to=devel@edk2.groups.io \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox