From: "Chao Li" <lichao@loongson.cn>
To: devel@edk2.groups.io
Cc: Michael D Kinney <michael.d.kinney@intel.com>,
Liming Gao <gaoliming@byosoft.com.cn>,
Zhiguang Liu <zhiguang.liu@intel.com>,
Baoqi Zhang <zhangbaoqi@loongson.cn>
Subject: [staging/LoongArch RESEND PATCH v1 26/33] MdePkg/BaseSynchronizationLib: LoongArch cache related code.
Date: Wed, 9 Feb 2022 14:56:13 +0800 [thread overview]
Message-ID: <20220209065613.2989479-1-lichao@loongson.cn> (raw)
Support LoongArch cache related functions.
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Liming Gao <gaoliming@byosoft.com.cn>
Cc: Zhiguang Liu <zhiguang.liu@intel.com>
Signed-off-by: Chao Li <lichao@loongson.cn>
Co-authored-by: Baoqi Zhang <zhangbaoqi@loongson.cn>
---
.../BaseSynchronizationLib.inf | 5 +
.../LoongArch64/Synchronization.c | 239 ++++++++++++++++++
2 files changed, 244 insertions(+)
create mode 100644 MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
diff --git a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
index 83d5b8ed7c..3cf5b6d4b1 100755
--- a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
+++ b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
@@ -4,6 +4,7 @@
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
# Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
+# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
@@ -83,6 +84,10 @@
Synchronization.c
RiscV64/Synchronization.S
+[Sources.LOONGARCH64]
+ Synchronization.c
+ LoongArch64/Synchronization.c
+
[Packages]
MdePkg/MdePkg.dec
diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
new file mode 100644
index 0000000000..a191a50c81
--- /dev/null
+++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
@@ -0,0 +1,239 @@
+/** @file
+ LoongArch synchronization functions.
+
+ Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/DebugLib.h>
+
+/**
+ Performs an atomic compare exchange operation on a 16-bit
+ unsigned integer.
+
+ Performs an atomic compare exchange operation on the 16-bit
+ unsigned integer specified by Value. If Value is equal to
+ CompareValue, then Value is set to ExchangeValue and
+ CompareValue is returned. If Value is not equal to
+ CompareValue, then Value is returned. The compare exchange
+ operation must be performed using MP safe mechanisms.
+
+ @param Value A pointer to the 16-bit value for the
+ compare exchange operation.
+ @param CompareValue 16-bit value used in compare operation.
+ @param ExchangeValue 16-bit value used in exchange operation.
+
+ @return The original *Value before exchange.
+
+**/
+UINT16
+EFIAPI
+InternalSyncCompareExchange16 (
+ IN volatile UINT16 *Value,
+ IN UINT16 CompareValue,
+ IN UINT16 ExchangeValue
+ )
+{
+ UINT32 RetValue, Temp, Shift;
+ UINT64 Mask, LocalCompareValue, LocalExchangeValue;
+ volatile UINT32 *Ptr32;
+
+ /* Check that ptr is naturally aligned */
+ ASSERT(!((UINT64)Value & (sizeof(Value) - 1)));
+
+ /* Mask inputs to the correct size. */
+ Mask = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof(UINT16) * 8) - 1))));
+ LocalCompareValue = ((UINT64)CompareValue) & Mask;
+ LocalExchangeValue = ((UINT64)ExchangeValue) & Mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * compare & exchange within the naturally aligned 4 byte integer
+ * that includes it.
+ */
+ Shift = (UINT64)Value & 0x3;
+ Shift *= 8; /* BITS_PER_BYTE */
+ LocalCompareValue <<= Shift;
+ LocalExchangeValue <<= Shift;
+ Mask <<= Shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ Ptr32 = (UINT32 *)((UINT64)Value & ~0x3);
+
+ __asm__ __volatile__ (
+ "1: \n"
+ "ll.w %0, %3 \n"
+ "and %1, %0, %4 \n"
+ "bne %1, %5, 2f \n"
+ "or %1, %1, %6 \n"
+ "sc.w %1, %2 \n"
+ "beqz %1, 1b \n"
+ "b 3f \n"
+ "2: \n"
+ "dbar 0 \n"
+ "3: \n"
+ : "=&r" (RetValue), "=&r" (Temp), "=" "ZC" (*Ptr32)
+ : "ZC" (*Ptr32), "Jr" (~Mask), "Jr" (LocalCompareValue), "Jr" (LocalExchangeValue)
+ : "memory"
+ );
+
+ return (RetValue & Mask) >> Shift;
+}
+
+/**
+ Performs an atomic compare exchange operation on a 32-bit
+ unsigned integer.
+
+ Performs an atomic compare exchange operation on the 32-bit
+ unsigned integer specified by Value. If Value is equal to
+ CompareValue, then Value is set to ExchangeValue and
+ CompareValue is returned. If Value is not equal to
+ CompareValue, then Value is returned. The compare exchange
+ operation must be performed using MP safe mechanisms.
+
+ @param Value A pointer to the 32-bit value for the
+ compare exchange operation.
+ @param CompareValue 32-bit value used in compare operation.
+ @param ExchangeValue 32-bit value used in exchange operation.
+
+ @return The original *Value before exchange.
+
+**/
+UINT32
+EFIAPI
+InternalSyncCompareExchange32 (
+ IN volatile UINT32 *Value,
+ IN UINT32 CompareValue,
+ IN UINT32 ExchangeValue
+ )
+{
+ UINT32 RetValue;
+
+ __asm__ __volatile__ (
+ "1: \n"
+ "ll.w %0, %2 \n"
+ "bne %0, %3, 2f \n"
+ "move %0, %4 \n"
+ "sc.w %0, %1 \n"
+ "beqz %0, 1b \n"
+ "b 3f \n"
+ "2: \n"
+ "dbar 0 \n"
+ "3: \n"
+ : "=&r" (RetValue), "=" "ZC" (*Value)
+ : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
+ : "memory"
+ );
+ return RetValue;
+}
+
+/**
+ Performs an atomic compare exchange operation on a 64-bit unsigned integer.
+
+ Performs an atomic compare exchange operation on the 64-bit unsigned integer specified
+ by Value. If Value is equal to CompareValue, then Value is set to ExchangeValue and
+ CompareValue is returned. If Value is not equal to CompareValue, then Value is returned.
+ The compare exchange operation must be performed using MP safe mechanisms.
+
+ @param Value A pointer to the 64-bit value for the compare exchange
+ operation.
+ @param CompareValue 64-bit value used in compare operation.
+ @param ExchangeValue 64-bit value used in exchange operation.
+
+ @return The original *Value before exchange.
+
+**/
+UINT64
+EFIAPI
+InternalSyncCompareExchange64 (
+ IN volatile UINT64 *Value,
+ IN UINT64 CompareValue,
+ IN UINT64 ExchangeValue
+ )
+{
+ UINT64 RetValue;
+
+ __asm__ __volatile__ (
+ "1: \n"
+ "ll.d %0, %2 \n"
+ "bne %0, %3, 2f \n"
+ "move %0, %4 \n"
+ "sc.d %0, %1 \n"
+ "beqz %0, 1b \n"
+ "b 3f \n"
+ "2: \n"
+ "dbar 0 \n"
+ "3: \n"
+ : "=&r" (RetValue), "=" "ZC" (*Value)
+ : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
+ : "memory"
+ );
+ return RetValue;
+}
+
+/**
+ Performs an atomic increment of an 32-bit unsigned integer.
+
+ Performs an atomic increment of the 32-bit unsigned integer specified by
+ Value and returns the incremented value. The increment operation must be
+ performed using MP safe mechanisms. The state of the return value is not
+ guaranteed to be MP safe.
+
+ @param Value A pointer to the 32-bit value to increment.
+
+ @return The incremented value.
+
+**/
+UINT32
+EFIAPI
+InternalSyncIncrement (
+ IN volatile UINT32 *Value
+ )
+{
+ UINT32 Temp = *Value;
+
+ __asm__ __volatile__(
+ "dbar 0 \n"
+ "amadd.w %1, %2, %0 \n"
+ : "+ZB" (*Value), "=&r" (Temp)
+ : "r" (1)
+ : "memory"
+ );
+ return *Value;
+}
+
+/**
+ Performs an atomic decrement of an 32-bit unsigned integer.
+
+ Performs an atomic decrement of the 32-bit unsigned integer specified by
+ Value and returns the decrement value. The decrement operation must be
+ performed using MP safe mechanisms. The state of the return value is not
+ guaranteed to be MP safe.
+
+ @param Value A pointer to the 32-bit value to decrement.
+
+ @return The decrement value.
+
+**/
+UINT32
+EFIAPI
+InternalSyncDecrement (
+ IN volatile UINT32 *Value
+ )
+{
+ UINT32 Temp = *Value;
+
+ __asm__ __volatile__(
+ "dbar 0 \n"
+ "amadd.w %1, %2, %0 \n"
+ : "+ZB" (*Value), "=&r" (Temp)
+ : "r" (-1)
+ : "memory"
+ );
+ return *Value;
+}
--
2.27.0
next reply other threads:[~2022-02-09 6:56 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-09 6:56 Chao Li [this message]
2022-04-08 11:02 ` [edk2-devel] [staging/LoongArch RESEND PATCH v1 26/33] MdePkg/BaseSynchronizationLib: LoongArch cache related code Abner Chang
2022-04-12 10:02 ` Chao Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-list from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220209065613.2989479-1-lichao@loongson.cn \
--to=devel@edk2.groups.io \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox