Hi Mike,
I'm guessing you're a little busy, or forgot about this issue. :)
I have converted the inline assembly code to ASM code, please review this patch again, thanks!


Thanks,
Chao
--------

On 9月 27 2022, at 7:27 晚上, chao li <lichao@loongson.cn> wrote:
Hi Mike,
I have converted the inline assembly code to ASM code, please review this patch again, thanks!


Thanks,
Chao
--------

On 9月 27 2022, at 7:13 晚上, Chao Li <lichao@loongson.cn> wrote:
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4053

Support LoongArch cache related functions.

Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Liming Gao <gaoliming@byosoft.com.cn>
Cc: Zhiguang Liu <zhiguang.liu@intel.com>

Signed-off-by: Chao Li <lichao@loongson.cn>
Co-authored-by: Baoqi Zhang <zhangbaoqi@loongson.cn>
---
.../BaseSynchronizationLib.inf | 6 +
.../LoongArch64/AsmSynchronization.S | 122 +++++++++
.../LoongArch64/Synchronization.c | 233 ++++++++++++++++++
3 files changed, 361 insertions(+)
create mode 100644 MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
create mode 100644 MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c

diff --git a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
index 02ba12961a..dd66ec1d03 100755
--- a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
+++ b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
@@ -4,6 +4,7 @@
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>

# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>

# Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>

+# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>

#

# SPDX-License-Identifier: BSD-2-Clause-Patent

#

@@ -82,6 +83,11 @@
Synchronization.c

RiscV64/Synchronization.S



+[Sources.LOONGARCH64]

+ Synchronization.c

+ LoongArch64/Synchronization.c | GCC

+ LoongArch64/AsmSynchronization.S | GCC

+

[Packages]

MdePkg/MdePkg.dec



diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
new file mode 100644
index 0000000000..3f1b06172d
--- /dev/null
+++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
@@ -0,0 +1,122 @@
+#------------------------------------------------------------------------------
+#
+# LoongArch synchronization ASM functions.
+#
+# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+
+ASM_GLOBAL ASM_PFX(AsmInternalSyncCompareExchange16)
+ASM_GLOBAL ASM_PFX(AsmInternalSyncCompareExchange32)
+ASM_GLOBAL ASM_PFX(AsmInternalSyncCompareExchange64)
+ASM_GLOBAL ASM_PFX(AsmInternalSyncIncrement)
+ASM_GLOBAL ASM_PFX(AsmInternalSyncDecrement)
+
+/**
+UINT32
+EFIAPI
+AsmInternalSyncCompareExchange16 (
+ IN volatile UINT32 *Ptr32,
+ IN UINT64 Mask,
+ IN UINT64 LocalCompareValue,
+ IN UINT64 LocalExchangeValue
+ )
+**/
+ASM_PFX(AsmInternalSyncCompareExchange16):
+1:
+ ll.w $t0, $a0, 0x0
+ and $t1, $t0, $a1
+ bne $t1, $a2, 2f
+ andn $t1, $t0, $a1
+ or $t1, $t1, $a3
+ sc.w $t1, $a0, 0x0
+ beqz $t1, 1b
+ b 3f
+2:
+ dbar 0
+3:
+ move $a0, $t0
+ jirl $zero, $ra, 0
+
+/**
+UINT32
+EFIAPI
+AsmInternalSyncCompareExchange32 (
+ IN volatile UINT32 *Value,
+ IN UINT64 CompareValue,
+ IN UINT64 ExchangeValue
+ )
+**/
+ASM_PFX(AsmInternalSyncCompareExchange32):
+1:
+ ll.w $t0, $a0, 0x0
+ bne $t0, $a1, 2f
+ move $t0, $a2
+ sc.w $t0, $a0, 0x0
+ beqz $t0, 1b
+ b 3f
+2:
+ dbar 0
+3:
+ move $a0, $t0
+ jirl $zero, $ra, 0
+
+/**
+UINT64
+EFIAPI
+AsmInternalSyncCompareExchange64 (
+ IN volatile UINT64 *Value,
+ IN UINT64 CompareValue,
+ IN UINT64 ExchangeValue
+ )
+**/
+ASM_PFX(AsmInternalSyncCompareExchange64):
+1:
+ ll.d $t0, $a0, 0x0
+ bne $t0, $a1, 2f
+ move $t0, $a2
+ sc.d $t0, $a0, 0x0
+ beqz $t0, 1b
+ b 3f
+2:
+ dbar 0
+3:
+ move $a0, $t0
+ jirl $zero, $ra, 0
+
+/**
+UINT32
+EFIAPI
+AsmInternalSyncIncrement (
+ IN volatile UINT32 *Value
+ )
+**/
+ASM_PFX(AsmInternalSyncIncrement):
+ move $t0, $a0
+ dbar 0
+ ld.w $t1, $t0, 0x0
+ li.w $t2, 1
+ amadd.w $t1, $t2, $t0
+
+ ld.w $a0, $t0, 0x0
+ jirl $zero, $ra, 0
+
+/**
+UINT32
+EFIAPI
+AsmInternalSyncDecrement (
+ IN volatile UINT32 *Value
+ )
+**/
+ASM_PFX(AsmInternalSyncDecrement):
+ move $t0, $a0
+ dbar 0
+ ld.w $t1, $t0, 0x0
+ li.w $t2, -1
+ amadd.w $t1, $t2, $t0
+
+ ld.w $a0, $t0, 0x0
+ jirl $zero, $ra, 0
+.end
diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
new file mode 100644
index 0000000000..d696c8ce10
--- /dev/null
+++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
@@ -0,0 +1,233 @@
+/** @file

+ LoongArch synchronization functions.

+

+ Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>

+

+ SPDX-License-Identifier: BSD-2-Clause-Patent

+

+**/

+

+#include <Library/DebugLib.h>

+

+UINT32

+EFIAPI

+AsmInternalSyncCompareExchange16 (

+ IN volatile UINT32 *,

+ IN UINT64,

+ IN UINT64,

+ IN UINT64

+ );

+

+UINT32

+EFIAPI

+AsmInternalSyncCompareExchange32 (

+ IN volatile UINT32 *,

+ IN UINT64,

+ IN UINT64

+ );

+

+UINT64

+EFIAPI

+AsmInternalSyncCompareExchange64 (

+ IN volatile UINT64 *,

+ IN UINT64,

+ IN UINT64

+ );

+

+UINT32

+EFIAPI

+AsmInternalSyncIncrement (

+ IN volatile UINT32 *

+ );

+

+UINT32

+EFIAPI

+AsmInternalSyncDecrement (

+ IN volatile UINT32 *

+ );

+

+/**

+ Performs an atomic compare exchange operation on a 16-bit

+ unsigned integer.

+

+ Performs an atomic compare exchange operation on the 16-bit

+ unsigned integer specified by Value. If Value is equal to

+ CompareValue, then Value is set to ExchangeValue and

+ CompareValue is returned. If Value is not equal to

+ CompareValue, then Value is returned. The compare exchange

+ operation must be performed using MP safe mechanisms.

+

+ @param[in] Value A pointer to the 16-bit value for the

+ compare exchange operation.

+ @param[in] CompareValue 16-bit value used in compare operation.

+ @param[in] ExchangeValue 16-bit value used in exchange operation.

+

+ @return The original *Value before exchange.

+

+**/

+UINT16

+EFIAPI

+InternalSyncCompareExchange16 (

+ IN volatile UINT16 *Value,

+ IN UINT16 CompareValue,

+ IN UINT16 ExchangeValue

+ )

+{

+ UINT32 RetValue;

+ UINT32 Shift;

+ UINT64 Mask;

+ UINT64 LocalCompareValue;

+ UINT64 LocalExchangeValue;

+ volatile UINT32 *Ptr32;

+

+ /* Check that ptr is naturally aligned */

+ ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));

+

+ /* Mask inputs to the correct size. */

+ Mask = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));

+ LocalCompareValue = ((UINT64)CompareValue) & Mask;

+ LocalExchangeValue = ((UINT64)ExchangeValue) & Mask;

+

+ /*

+ * Calculate a shift & mask that correspond to the value we wish to

+ * compare & exchange within the naturally aligned 4 byte integer

+ * that includes it.

+ */

+ Shift = (UINT64)Value & 0x3;

+ Shift *= 8; /* BITS_PER_BYTE */

+ LocalCompareValue <<= Shift;

+ LocalExchangeValue <<= Shift;

+ Mask <<= Shift;

+

+ /*

+ * Calculate a pointer to the naturally aligned 4 byte integer that

+ * includes our byte of interest, and load its value.

+ */

+ Ptr32 = (UINT32 *)((UINT64)Value & ~0x3);

+

+ RetValue = AsmInternalSyncCompareExchange16 (

+ Ptr32,

+ Mask,

+ LocalCompareValue,

+ LocalExchangeValue

+ );

+

+ return (RetValue & Mask) >> Shift;

+}

+

+/**

+ Performs an atomic compare exchange operation on a 32-bit

+ unsigned integer.

+

+ Performs an atomic compare exchange operation on the 32-bit

+ unsigned integer specified by Value. If Value is equal to

+ CompareValue, then Value is set to ExchangeValue and

+ CompareValue is returned. If Value is not equal to

+ CompareValue, then Value is returned. The compare exchange

+ operation must be performed using MP safe mechanisms.

+

+ @param[in] Value A pointer to the 32-bit value for the

+ compare exchange operation.

+ @param[in] CompareValue 32-bit value used in compare operation.

+ @param[in] ExchangeValue 32-bit value used in exchange operation.

+

+ @return The original *Value before exchange.

+

+**/

+UINT32

+EFIAPI

+InternalSyncCompareExchange32 (

+ IN volatile UINT32 *Value,

+ IN UINT32 CompareValue,

+ IN UINT32 ExchangeValue

+ )

+{

+ UINT32 RetValue;

+

+ RetValue = AsmInternalSyncCompareExchange32 (

+ Value,

+ CompareValue,

+ ExchangeValue

+ );

+

+ return RetValue;

+}

+

+/**

+ Performs an atomic compare exchange operation on a 64-bit unsigned integer.

+

+ Performs an atomic compare exchange operation on the 64-bit unsigned integer specified

+ by Value. If Value is equal to CompareValue, then Value is set to ExchangeValue and

+ CompareValue is returned. If Value is not equal to CompareValue, then Value is returned.

+ The compare exchange operation must be performed using MP safe mechanisms.

+

+ @param[in] Value A pointer to the 64-bit value for the compare exchange

+ operation.

+ @param[in] CompareValue 64-bit value used in compare operation.

+ @param[in] ExchangeValue 64-bit value used in exchange operation.

+

+ @return The original *Value before exchange.

+

+**/

+UINT64

+EFIAPI

+InternalSyncCompareExchange64 (

+ IN volatile UINT64 *Value,

+ IN UINT64 CompareValue,

+ IN UINT64 ExchangeValue

+ )

+{

+ UINT64 RetValue;

+

+ RetValue = AsmInternalSyncCompareExchange64 (

+ Value,

+ CompareValue,

+ ExchangeValue

+ );

+

+ return RetValue;

+}

+

+/**

+ Performs an atomic increment of an 32-bit unsigned integer.

+

+ Performs an atomic increment of the 32-bit unsigned integer specified by

+ Value and returns the incremented value. The increment operation must be

+ performed using MP safe mechanisms. The state of the return value is not

+ guaranteed to be MP safe.

+

+ @param[in] Value A pointer to the 32-bit value to increment.

+

+ @return The incremented value.

+

+**/

+UINT32

+EFIAPI

+InternalSyncIncrement (

+ IN volatile UINT32 *Value

+ )

+{

+ return AsmInternalSyncIncrement (Value);

+}

+

+/**

+ Performs an atomic decrement of an 32-bit unsigned integer.

+

+ Performs an atomic decrement of the 32-bit unsigned integer specified by

+ Value and returns the decrement value. The decrement operation must be

+ performed using MP safe mechanisms. The state of the return value is not

+ guaranteed to be MP safe.

+

+ @param[in] Value A pointer to the 32-bit value to decrement.

+

+ @return The decrement value.

+

+**/

+UINT32

+EFIAPI

+InternalSyncDecrement (

+ IN volatile UINT32 *Value

+ )

+{

+ return AsmInternalSyncDecrement (Value);

+}

--
2.27.0