public inbox for devel@edk2.groups.io
 help / color / mirror / Atom feed
* [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code.
@ 2022-09-14  9:41 Chao Li
  2022-09-23  7:21 ` Chao Li
  2022-09-23 15:57 ` Michael D Kinney
  0 siblings, 2 replies; 4+ messages in thread
From: Chao Li @ 2022-09-14  9:41 UTC (permalink / raw)
  To: devel; +Cc: Michael D Kinney, Liming Gao, Zhiguang Liu, Baoqi Zhang

REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4053

Support LoongArch cache related functions.

Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Liming Gao <gaoliming@byosoft.com.cn>
Cc: Zhiguang Liu <zhiguang.liu@intel.com>

Signed-off-by: Chao Li <lichao@loongson.cn>
Co-authored-by: Baoqi Zhang <zhangbaoqi@loongson.cn>
---
 .../BaseSynchronizationLib.inf                |   5 +
 .../LoongArch64/Synchronization.c             | 246 ++++++++++++++++++
 2 files changed, 251 insertions(+)
 create mode 100644 MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c

diff --git a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
index 02ba12961a..10021f3352 100755
--- a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
+++ b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
@@ -4,6 +4,7 @@
 #  Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
 #  Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
 #  Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
+#  Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
 #
 #  SPDX-License-Identifier: BSD-2-Clause-Patent
 #
@@ -82,6 +83,10 @@
   Synchronization.c
   RiscV64/Synchronization.S
 
+[Sources.LOONGARCH64]
+  Synchronization.c
+  LoongArch64/Synchronization.c
+
 [Packages]
   MdePkg/MdePkg.dec
 
diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
new file mode 100644
index 0000000000..b7789f3212
--- /dev/null
+++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
@@ -0,0 +1,246 @@
+/** @file
+  LoongArch synchronization functions.
+
+  Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
+
+  SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/DebugLib.h>
+
+/**
+  Performs an atomic compare exchange operation on a 16-bit
+  unsigned integer.
+
+  Performs an atomic compare exchange operation on the 16-bit
+  unsigned integer specified by Value.  If Value is equal to
+  CompareValue, then Value is set to ExchangeValue and
+  CompareValue is returned.  If Value is not equal to
+  CompareValue, then Value is returned. The compare exchange
+  operation must be performed using MP safe mechanisms.
+
+  @param[in]  Value         A pointer to the 16-bit value for the
+                        compare exchange operation.
+  @param[in]  CompareValue  16-bit value used in compare operation.
+  @param[in]  ExchangeValue 16-bit value used in exchange operation.
+
+  @return The original *Value before exchange.
+
+**/
+UINT16
+EFIAPI
+InternalSyncCompareExchange16 (
+  IN      volatile UINT16  *Value,
+  IN      UINT16           CompareValue,
+  IN      UINT16           ExchangeValue
+  )
+{
+  UINT32           RetValue;
+  UINT32           Temp;
+  UINT32           Shift;
+  UINT64           Mask;
+  UINT64           LocalCompareValue;
+  UINT64           LocalExchangeValue;
+  volatile UINT32  *Ptr32;
+
+  /* Check that ptr is naturally aligned */
+  ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));
+
+  /* Mask inputs to the correct size. */
+  Mask               = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));
+  LocalCompareValue  = ((UINT64)CompareValue) & Mask;
+  LocalExchangeValue = ((UINT64)ExchangeValue) & Mask;
+
+  /*
+   * Calculate a shift & mask that correspond to the value we wish to
+   * compare & exchange within the naturally aligned 4 byte integer
+   * that includes it.
+   */
+  Shift                = (UINT64)Value & 0x3;
+  Shift               *= 8; /* BITS_PER_BYTE */
+  LocalCompareValue  <<= Shift;
+  LocalExchangeValue <<= Shift;
+  Mask               <<= Shift;
+
+  /*
+   * Calculate a pointer to the naturally aligned 4 byte integer that
+   * includes our byte of interest, and load its value.
+   */
+  Ptr32 = (UINT32 *)((UINT64)Value & ~0x3);
+
+  __asm__ __volatile__ (
+    "1:               \n"
+    "ll.w  %0, %3     \n"
+    "and   %1, %0, %4 \n"
+    "bne   %1, %5, 2f \n"
+    "andn  %1, %0, %4 \n"
+    "or    %1, %1, %6 \n"
+    "sc.w  %1, %2     \n"
+    "beqz  %1, 1b     \n"
+    "b     3f         \n"
+    "2:               \n"
+    "dbar  0          \n"
+    "3:               \n"
+    : "=&r" (RetValue), "=&r" (Temp), "=" "ZC" (*Ptr32)
+    : "ZC" (*Ptr32), "Jr" (Mask), "Jr" (LocalCompareValue), "Jr" (LocalExchangeValue)
+    : "memory"
+  );
+
+  return (RetValue & Mask) >> Shift;
+}
+
+/**
+  Performs an atomic compare exchange operation on a 32-bit
+  unsigned integer.
+
+  Performs an atomic compare exchange operation on the 32-bit
+  unsigned integer specified by Value.  If Value is equal to
+  CompareValue, then Value is set to ExchangeValue and
+  CompareValue is returned.  If Value is not equal to
+  CompareValue, then Value is returned. The compare exchange
+  operation must be performed using MP safe mechanisms.
+
+  @param[in]  Value         A pointer to the 32-bit value for the
+                        compare exchange operation.
+  @param[in]  CompareValue  32-bit value used in compare operation.
+  @param[in]  ExchangeValue 32-bit value used in exchange operation.
+
+  @return The original *Value before exchange.
+
+**/
+UINT32
+EFIAPI
+InternalSyncCompareExchange32 (
+  IN      volatile UINT32  *Value,
+  IN      UINT32           CompareValue,
+  IN      UINT32           ExchangeValue
+  )
+{
+  UINT32  RetValue;
+
+  __asm__ __volatile__ (
+    "1:              \n"
+    "ll.w %0, %2     \n"
+    "bne  %0, %3, 2f \n"
+    "move %0, %4     \n"
+    "sc.w %0, %1     \n"
+    "beqz %0, 1b     \n"
+    "b    3f         \n"
+    "2:              \n"
+    "dbar 0          \n"
+    "3:              \n"
+    : "=&r" (RetValue), "=" "ZC" (*Value)
+    : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
+    : "memory"
+  );
+  return RetValue;
+}
+
+/**
+  Performs an atomic compare exchange operation on a 64-bit unsigned integer.
+
+  Performs an atomic compare exchange operation on the 64-bit unsigned integer specified
+  by Value.  If Value is equal to CompareValue, then Value is set to ExchangeValue and
+  CompareValue is returned.  If Value is not equal to CompareValue, then Value is returned.
+  The compare exchange operation must be performed using MP safe mechanisms.
+
+  @param[in]  Value         A pointer to the 64-bit value for the compare exchange
+                        operation.
+  @param[in]  CompareValue  64-bit value used in compare operation.
+  @param[in]  ExchangeValue 64-bit value used in exchange operation.
+
+  @return The original *Value before exchange.
+
+**/
+UINT64
+EFIAPI
+InternalSyncCompareExchange64 (
+  IN      volatile UINT64  *Value,
+  IN      UINT64           CompareValue,
+  IN      UINT64           ExchangeValue
+  )
+{
+  UINT64  RetValue;
+
+  __asm__ __volatile__ (
+    "1:              \n"
+    "ll.d %0, %2     \n"
+    "bne  %0, %3, 2f \n"
+    "move %0, %4     \n"
+    "sc.d %0, %1     \n"
+    "beqz %0, 1b     \n"
+    "b    3f         \n"
+    "2:              \n"
+    "dbar 0          \n"
+    "3:              \n"
+    : "=&r" (RetValue), "=" "ZC" (*Value)
+    : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
+    : "memory"
+  );
+  return RetValue;
+}
+
+/**
+  Performs an atomic increment of an 32-bit unsigned integer.
+
+  Performs an atomic increment of the 32-bit unsigned integer specified by
+  Value and returns the incremented value. The increment operation must be
+  performed using MP safe mechanisms. The state of the return value is not
+  guaranteed to be MP safe.
+
+  @param[in]  Value A pointer to the 32-bit value to increment.
+
+  @return The incremented value.
+
+**/
+UINT32
+EFIAPI
+InternalSyncIncrement (
+  IN      volatile UINT32  *Value
+  )
+{
+  UINT32  Temp;
+
+  Temp = *Value;
+  __asm__ __volatile__ (
+     "dbar    0          \n"
+     "amadd.w %1, %2, %0 \n"
+     : "+ZB" (*Value), "=&r" (Temp)
+     : "r" (1)
+     : "memory"
+  );
+  return *Value;
+}
+
+/**
+  Performs an atomic decrement of an 32-bit unsigned integer.
+
+  Performs an atomic decrement of the 32-bit unsigned integer specified by
+  Value and returns the decrement value. The decrement operation must be
+  performed using MP safe mechanisms. The state of the return value is not
+  guaranteed to be MP safe.
+
+  @param[in]  Value A pointer to the 32-bit value to decrement.
+
+  @return The decrement value.
+
+**/
+UINT32
+EFIAPI
+InternalSyncDecrement (
+  IN      volatile UINT32  *Value
+  )
+{
+  UINT32  Temp;
+
+  Temp = *Value;
+  __asm__ __volatile__ (
+     "dbar    0          \n"
+     "amadd.w %1, %2, %0 \n"
+     : "+ZB" (*Value), "=&r" (Temp)
+     : "r" (-1)
+     : "memory"
+  );
+  return *Value;
+}
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code.
  2022-09-14  9:41 [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code Chao Li
@ 2022-09-23  7:21 ` Chao Li
  2022-09-23 15:57 ` Michael D Kinney
  1 sibling, 0 replies; 4+ messages in thread
From: Chao Li @ 2022-09-23  7:21 UTC (permalink / raw)
  To: Michael D Kinney, Liming Gao, Zhiguang Liu, Baoqi Zhang
  Cc: devel@edk2.groups.io

[-- Attachment #1: Type: text/plain, Size: 9090 bytes --]

Hi Mike, Liming and Zhiguang,
This patch has not been reviewed, would you please review it?

Thanks,
Chao
--------

On 9月 14 2022, at 5:41 下午, Chao Li <lichao@loongson.cn> wrote:
> REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4053
>
> Support LoongArch cache related functions.
> Cc: Michael D Kinney <michael.d.kinney@intel.com>
> Cc: Liming Gao <gaoliming@byosoft.com.cn>
> Cc: Zhiguang Liu <zhiguang.liu@intel.com>
>
> Signed-off-by: Chao Li <lichao@loongson.cn>
> Co-authored-by: Baoqi Zhang <zhangbaoqi@loongson.cn>
> ---
> .../BaseSynchronizationLib.inf | 5 +
> .../LoongArch64/Synchronization.c | 246 ++++++++++++++++++
> 2 files changed, 251 insertions(+)
> create mode 100644 MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
>
> diff --git a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> index 02ba12961a..10021f3352 100755
> --- a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> +++ b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> @@ -4,6 +4,7 @@
> # Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
>
> # Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
> # Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
> +# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
> #
> # SPDX-License-Identifier: BSD-2-Clause-Patent
> #
> @@ -82,6 +83,10 @@
> Synchronization.c
>
> RiscV64/Synchronization.S
>
>
> +[Sources.LOONGARCH64]
> + Synchronization.c
> + LoongArch64/Synchronization.c
> +
> [Packages]
> MdePkg/MdePkg.dec
>
>
> diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> new file mode 100644
> index 0000000000..b7789f3212
> --- /dev/null
> +++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> @@ -0,0 +1,246 @@
> +/** @file
>
> + LoongArch synchronization functions.
> +
> + Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
> +
> + SPDX-License-Identifier: BSD-2-Clause-Patent
> +
> +**/
> +
> +#include <Library/DebugLib.h>
> +
> +/**
> + Performs an atomic compare exchange operation on a 16-bit
> + unsigned integer.
> +
> + Performs an atomic compare exchange operation on the 16-bit
> + unsigned integer specified by Value. If Value is equal to
> + CompareValue, then Value is set to ExchangeValue and
> + CompareValue is returned. If Value is not equal to
> + CompareValue, then Value is returned. The compare exchange
> + operation must be performed using MP safe mechanisms.
> +
> + @param[in] Value A pointer to the 16-bit value for the
> + compare exchange operation.
> + @param[in] CompareValue 16-bit value used in compare operation.
> + @param[in] ExchangeValue 16-bit value used in exchange operation.
> +
> + @return The original *Value before exchange.
> +
> +**/
> +UINT16
> +EFIAPI
> +InternalSyncCompareExchange16 (
> + IN volatile UINT16 *Value,
> + IN UINT16 CompareValue,
> + IN UINT16 ExchangeValue
> + )
> +{
> + UINT32 RetValue;
> + UINT32 Temp;
> + UINT32 Shift;
> + UINT64 Mask;
> + UINT64 LocalCompareValue;
> + UINT64 LocalExchangeValue;
> + volatile UINT32 *Ptr32;
> +
> + /* Check that ptr is naturally aligned */
> + ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));
> +
> + /* Mask inputs to the correct size. */
> + Mask = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));
> + LocalCompareValue = ((UINT64)CompareValue) & Mask;
> + LocalExchangeValue = ((UINT64)ExchangeValue) & Mask;
> +
> + /*
> + * Calculate a shift & mask that correspond to the value we wish to
> + * compare & exchange within the naturally aligned 4 byte integer
> + * that includes it.
> + */
> + Shift = (UINT64)Value & 0x3;
> + Shift *= 8; /* BITS_PER_BYTE */
> + LocalCompareValue <<= Shift;
> + LocalExchangeValue <<= Shift;
> + Mask <<= Shift;
> +
> + /*
> + * Calculate a pointer to the naturally aligned 4 byte integer that
> + * includes our byte of interest, and load its value.
> + */
> + Ptr32 = (UINT32 *)((UINT64)Value & ~0x3);
> +
> + __asm__ __volatile__ (
> + "1: \n"
> + "ll.w %0, %3 \n"
> + "and %1, %0, %4 \n"
> + "bne %1, %5, 2f \n"
> + "andn %1, %0, %4 \n"
> + "or %1, %1, %6 \n"
> + "sc.w %1, %2 \n"
> + "beqz %1, 1b \n"
> + "b 3f \n"
> + "2: \n"
> + "dbar 0 \n"
> + "3: \n"
> + : "=&r" (RetValue), "=&r" (Temp), "=" "ZC" (*Ptr32)
> + : "ZC" (*Ptr32), "Jr" (Mask), "Jr" (LocalCompareValue), "Jr" (LocalExchangeValue)
> + : "memory"
> + );
> +
> + return (RetValue & Mask) >> Shift;
> +}
> +
> +/**
> + Performs an atomic compare exchange operation on a 32-bit
> + unsigned integer.
> +
> + Performs an atomic compare exchange operation on the 32-bit
> + unsigned integer specified by Value. If Value is equal to
> + CompareValue, then Value is set to ExchangeValue and
> + CompareValue is returned. If Value is not equal to
> + CompareValue, then Value is returned. The compare exchange
> + operation must be performed using MP safe mechanisms.
> +
> + @param[in] Value A pointer to the 32-bit value for the
> + compare exchange operation.
> + @param[in] CompareValue 32-bit value used in compare operation.
> + @param[in] ExchangeValue 32-bit value used in exchange operation.
> +
> + @return The original *Value before exchange.
> +
> +**/
> +UINT32
> +EFIAPI
> +InternalSyncCompareExchange32 (
> + IN volatile UINT32 *Value,
> + IN UINT32 CompareValue,
> + IN UINT32 ExchangeValue
> + )
> +{
> + UINT32 RetValue;
> +
> + __asm__ __volatile__ (
> + "1: \n"
> + "ll.w %0, %2 \n"
> + "bne %0, %3, 2f \n"
> + "move %0, %4 \n"
> + "sc.w %0, %1 \n"
> + "beqz %0, 1b \n"
> + "b 3f \n"
> + "2: \n"
> + "dbar 0 \n"
> + "3: \n"
> + : "=&r" (RetValue), "=" "ZC" (*Value)
> + : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
> + : "memory"
> + );
> + return RetValue;
> +}
> +
> +/**
> + Performs an atomic compare exchange operation on a 64-bit unsigned integer.
> +
> + Performs an atomic compare exchange operation on the 64-bit unsigned integer specified
> + by Value. If Value is equal to CompareValue, then Value is set to ExchangeValue and
> + CompareValue is returned. If Value is not equal to CompareValue, then Value is returned.
> + The compare exchange operation must be performed using MP safe mechanisms.
> +
> + @param[in] Value A pointer to the 64-bit value for the compare exchange
> + operation.
> + @param[in] CompareValue 64-bit value used in compare operation.
> + @param[in] ExchangeValue 64-bit value used in exchange operation.
> +
> + @return The original *Value before exchange.
> +
> +**/
> +UINT64
> +EFIAPI
> +InternalSyncCompareExchange64 (
> + IN volatile UINT64 *Value,
> + IN UINT64 CompareValue,
> + IN UINT64 ExchangeValue
> + )
> +{
> + UINT64 RetValue;
> +
> + __asm__ __volatile__ (
> + "1: \n"
> + "ll.d %0, %2 \n"
> + "bne %0, %3, 2f \n"
> + "move %0, %4 \n"
> + "sc.d %0, %1 \n"
> + "beqz %0, 1b \n"
> + "b 3f \n"
> + "2: \n"
> + "dbar 0 \n"
> + "3: \n"
> + : "=&r" (RetValue), "=" "ZC" (*Value)
> + : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
> + : "memory"
> + );
> + return RetValue;
> +}
> +
> +/**
> + Performs an atomic increment of an 32-bit unsigned integer.
> +
> + Performs an atomic increment of the 32-bit unsigned integer specified by
> + Value and returns the incremented value. The increment operation must be
> + performed using MP safe mechanisms. The state of the return value is not
> + guaranteed to be MP safe.
> +
> + @param[in] Value A pointer to the 32-bit value to increment.
> +
> + @return The incremented value.
> +
> +**/
> +UINT32
> +EFIAPI
> +InternalSyncIncrement (
> + IN volatile UINT32 *Value
> + )
> +{
> + UINT32 Temp;
> +
> + Temp = *Value;
> + __asm__ __volatile__ (
> + "dbar 0 \n"
> + "amadd.w %1, %2, %0 \n"
> + : "+ZB" (*Value), "=&r" (Temp)
> + : "r" (1)
> + : "memory"
> + );
> + return *Value;
> +}
> +
> +/**
> + Performs an atomic decrement of an 32-bit unsigned integer.
> +
> + Performs an atomic decrement of the 32-bit unsigned integer specified by
> + Value and returns the decrement value. The decrement operation must be
> + performed using MP safe mechanisms. The state of the return value is not
> + guaranteed to be MP safe.
> +
> + @param[in] Value A pointer to the 32-bit value to decrement.
> +
> + @return The decrement value.
> +
> +**/
> +UINT32
> +EFIAPI
> +InternalSyncDecrement (
> + IN volatile UINT32 *Value
> + )
> +{
> + UINT32 Temp;
> +
> + Temp = *Value;
> + __asm__ __volatile__ (
> + "dbar 0 \n"
> + "amadd.w %1, %2, %0 \n"
> + : "+ZB" (*Value), "=&r" (Temp)
> + : "r" (-1)
> + : "memory"
> + );
> + return *Value;
> +}
> --
> 2.27.0
>


[-- Attachment #2: Type: text/html, Size: 12652 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code.
  2022-09-14  9:41 [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code Chao Li
  2022-09-23  7:21 ` Chao Li
@ 2022-09-23 15:57 ` Michael D Kinney
  2022-09-24  1:57   ` Chao Li
  1 sibling, 1 reply; 4+ messages in thread
From: Michael D Kinney @ 2022-09-23 15:57 UTC (permalink / raw)
  To: Chao Li, devel@edk2.groups.io, Kinney, Michael D
  Cc: Gao, Liming, Liu, Zhiguang, Baoqi Zhang

Comments below.

Recommend moving inline assembly to .S files too.

> -----Original Message-----
> From: Chao Li <lichao@loongson.cn>
> Sent: Wednesday, September 14, 2022 2:42 AM
> To: devel@edk2.groups.io
> Cc: Kinney, Michael D <michael.d.kinney@intel.com>; Gao, Liming <gaoliming@byosoft.com.cn>; Liu, Zhiguang <zhiguang.liu@intel.com>;
> Baoqi Zhang <zhangbaoqi@loongson.cn>
> Subject: [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code.
> 
> REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4053
> 
> Support LoongArch cache related functions.
> 
> Cc: Michael D Kinney <michael.d.kinney@intel.com>
> Cc: Liming Gao <gaoliming@byosoft.com.cn>
> Cc: Zhiguang Liu <zhiguang.liu@intel.com>
> 
> Signed-off-by: Chao Li <lichao@loongson.cn>
> Co-authored-by: Baoqi Zhang <zhangbaoqi@loongson.cn>
> ---
>  .../BaseSynchronizationLib.inf                |   5 +
>  .../LoongArch64/Synchronization.c             | 246 ++++++++++++++++++
>  2 files changed, 251 insertions(+)
>  create mode 100644 MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> 
> diff --git a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> index 02ba12961a..10021f3352 100755
> --- a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> +++ b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> @@ -4,6 +4,7 @@
>  #  Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
> 
>  #  Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
> 
>  #  Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
> 
> +#  Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
> 
>  #
> 
>  #  SPDX-License-Identifier: BSD-2-Clause-Patent
> 
>  #
> 
> @@ -82,6 +83,10 @@
>    Synchronization.c
> 
>    RiscV64/Synchronization.S
> 
> 
> 
> +[Sources.LOONGARCH64]
> 
> +  Synchronization.c
> 
> +  LoongArch64/Synchronization.c
> 
> +
> 
>  [Packages]
> 
>    MdePkg/MdePkg.dec
> 
> 
> 
> diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> new file mode 100644
> index 0000000000..b7789f3212
> --- /dev/null
> +++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> @@ -0,0 +1,246 @@
> +/** @file
> 
> +  LoongArch synchronization functions.
> 
> +
> 
> +  Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
> 
> +
> 
> +  SPDX-License-Identifier: BSD-2-Clause-Patent
> 
> +
> 
> +**/
> 
> +
> 
> +#include <Library/DebugLib.h>
> 
> +
> 
> +/**
> 
> +  Performs an atomic compare exchange operation on a 16-bit
> 
> +  unsigned integer.
> 
> +
> 
> +  Performs an atomic compare exchange operation on the 16-bit
> 
> +  unsigned integer specified by Value.  If Value is equal to
> 
> +  CompareValue, then Value is set to ExchangeValue and
> 
> +  CompareValue is returned.  If Value is not equal to
> 
> +  CompareValue, then Value is returned. The compare exchange
> 
> +  operation must be performed using MP safe mechanisms.
> 
> +
> 
> +  @param[in]  Value         A pointer to the 16-bit value for the
> 
> +                        compare exchange operation.
> 
> +  @param[in]  CompareValue  16-bit value used in compare operation.
> 
> +  @param[in]  ExchangeValue 16-bit value used in exchange operation.
> 
> +
> 
> +  @return The original *Value before exchange.
> 
> +
> 
> +**/
> 
> +UINT16
> 
> +EFIAPI
> 
> +InternalSyncCompareExchange16 (
> 
> +  IN      volatile UINT16  *Value,
> 
> +  IN      UINT16           CompareValue,
> 
> +  IN      UINT16           ExchangeValue
> 
> +  )
> 
> +{
> 
> +  UINT32           RetValue;
> 
> +  UINT32           Temp;
> 
> +  UINT32           Shift;
> 
> +  UINT64           Mask;
> 
> +  UINT64           LocalCompareValue;
> 
> +  UINT64           LocalExchangeValue;
> 
> +  volatile UINT32  *Ptr32;
> 
> +
> 
> +  /* Check that ptr is naturally aligned */
> 
> +  ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));

Recommend using macros in Base.h t check alignment.
> 
> +
> 
> +  /* Mask inputs to the correct size. */
> 
> +  Mask               = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));

This statement is hard to read. Especially with ~0 and << 0.  Base.h has MAX_UINT64 which may be better than ~0.

> 
> +  LocalCompareValue  = ((UINT64)CompareValue) & Mask;
> 
> +  LocalExchangeValue = ((UINT64)ExchangeValue) & Mask;
> 
> +
> 
> +  /*
> 
> +   * Calculate a shift & mask that correspond to the value we wish to
> 
> +   * compare & exchange within the naturally aligned 4 byte integer
> 
> +   * that includes it.
> 
> +   */
> 
> +  Shift                = (UINT64)Value & 0x3;
> 
> +  Shift               *= 8; /* BITS_PER_BYTE */
> 
> +  LocalCompareValue  <<= Shift;
> 
> +  LocalExchangeValue <<= Shift;
> 
> +  Mask               <<= Shift;
> 
> +
> 
> +  /*
> 
> +   * Calculate a pointer to the naturally aligned 4 byte integer that
> 
> +   * includes our byte of interest, and load its value.
> 
> +   */
> 
> +  Ptr32 = (UINT32 *)((UINT64)Value & ~0x3);
> 
> +
> 
> +  __asm__ __volatile__ (
> 
> +    "1:               \n"
> 
> +    "ll.w  %0, %3     \n"
> 
> +    "and   %1, %0, %4 \n"
> 
> +    "bne   %1, %5, 2f \n"
> 
> +    "andn  %1, %0, %4 \n"
> 
> +    "or    %1, %1, %6 \n"
> 
> +    "sc.w  %1, %2     \n"
> 
> +    "beqz  %1, 1b     \n"
> 
> +    "b     3f         \n"
> 
> +    "2:               \n"
> 
> +    "dbar  0          \n"
> 
> +    "3:               \n"
> 
> +    : "=&r" (RetValue), "=&r" (Temp), "=" "ZC" (*Ptr32)
> 
> +    : "ZC" (*Ptr32), "Jr" (Mask), "Jr" (LocalCompareValue), "Jr" (LocalExchangeValue)
> 
> +    : "memory"
> 
> +  );

Recommend removing inline assembly

> 
> +
> 
> +  return (RetValue & Mask) >> Shift;
> 
> +}
> 
> +
> 
> +/**
> 
> +  Performs an atomic compare exchange operation on a 32-bit
> 
> +  unsigned integer.
> 
> +
> 
> +  Performs an atomic compare exchange operation on the 32-bit
> 
> +  unsigned integer specified by Value.  If Value is equal to
> 
> +  CompareValue, then Value is set to ExchangeValue and
> 
> +  CompareValue is returned.  If Value is not equal to
> 
> +  CompareValue, then Value is returned. The compare exchange
> 
> +  operation must be performed using MP safe mechanisms.
> 
> +
> 
> +  @param[in]  Value         A pointer to the 32-bit value for the
> 
> +                        compare exchange operation.
> 
> +  @param[in]  CompareValue  32-bit value used in compare operation.
> 
> +  @param[in]  ExchangeValue 32-bit value used in exchange operation.
> 
> +
> 
> +  @return The original *Value before exchange.
> 
> +
> 
> +**/
> 
> +UINT32
> 
> +EFIAPI
> 
> +InternalSyncCompareExchange32 (
> 
> +  IN      volatile UINT32  *Value,
> 
> +  IN      UINT32           CompareValue,
> 
> +  IN      UINT32           ExchangeValue
> 
> +  )
> 
> +{
> 
> +  UINT32  RetValue;
> 
> +
> 
> +  __asm__ __volatile__ (
> 
> +    "1:              \n"
> 
> +    "ll.w %0, %2     \n"
> 
> +    "bne  %0, %3, 2f \n"
> 
> +    "move %0, %4     \n"
> 
> +    "sc.w %0, %1     \n"
> 
> +    "beqz %0, 1b     \n"
> 
> +    "b    3f         \n"
> 
> +    "2:              \n"
> 
> +    "dbar 0          \n"
> 
> +    "3:              \n"
> 
> +    : "=&r" (RetValue), "=" "ZC" (*Value)
> 
> +    : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
> 
> +    : "memory"
> 
> +  );

Recommend removing inline assembly

> 
> +  return RetValue;
> 
> +}
> 
> +
> 
> +/**
> 
> +  Performs an atomic compare exchange operation on a 64-bit unsigned integer.
> 
> +
> 
> +  Performs an atomic compare exchange operation on the 64-bit unsigned integer specified
> 
> +  by Value.  If Value is equal to CompareValue, then Value is set to ExchangeValue and
> 
> +  CompareValue is returned.  If Value is not equal to CompareValue, then Value is returned.
> 
> +  The compare exchange operation must be performed using MP safe mechanisms.
> 
> +
> 
> +  @param[in]  Value         A pointer to the 64-bit value for the compare exchange
> 
> +                        operation.
> 
> +  @param[in]  CompareValue  64-bit value used in compare operation.
> 
> +  @param[in]  ExchangeValue 64-bit value used in exchange operation.
> 
> +
> 
> +  @return The original *Value before exchange.
> 
> +
> 
> +**/
> 
> +UINT64
> 
> +EFIAPI
> 
> +InternalSyncCompareExchange64 (
> 
> +  IN      volatile UINT64  *Value,
> 
> +  IN      UINT64           CompareValue,
> 
> +  IN      UINT64           ExchangeValue
> 
> +  )
> 
> +{
> 
> +  UINT64  RetValue;
> 
> +
> 
> +  __asm__ __volatile__ (
> 
> +    "1:              \n"
> 
> +    "ll.d %0, %2     \n"
> 
> +    "bne  %0, %3, 2f \n"
> 
> +    "move %0, %4     \n"
> 
> +    "sc.d %0, %1     \n"
> 
> +    "beqz %0, 1b     \n"
> 
> +    "b    3f         \n"
> 
> +    "2:              \n"
> 
> +    "dbar 0          \n"
> 
> +    "3:              \n"
> 
> +    : "=&r" (RetValue), "=" "ZC" (*Value)
> 
> +    : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
> 
> +    : "memory"
> 
> +  );
> 
> +  return RetValue;
> 
> +}
> 
> +
> 
> +/**
> 
> +  Performs an atomic increment of an 32-bit unsigned integer.
> 
> +
> 
> +  Performs an atomic increment of the 32-bit unsigned integer specified by
> 
> +  Value and returns the incremented value. The increment operation must be
> 
> +  performed using MP safe mechanisms. The state of the return value is not
> 
> +  guaranteed to be MP safe.
> 
> +
> 
> +  @param[in]  Value A pointer to the 32-bit value to increment.
> 
> +
> 
> +  @return The incremented value.
> 
> +
> 
> +**/
> 
> +UINT32
> 
> +EFIAPI
> 
> +InternalSyncIncrement (
> 
> +  IN      volatile UINT32  *Value
> 
> +  )
> 
> +{
> 
> +  UINT32  Temp;
> 
> +
> 
> +  Temp = *Value;
> 
> +  __asm__ __volatile__ (
> 
> +     "dbar    0          \n"
> 
> +     "amadd.w %1, %2, %0 \n"
> 
> +     : "+ZB" (*Value), "=&r" (Temp)
> 
> +     : "r" (1)
> 
> +     : "memory"
> 
> +  );
> 
> +  return *Value;
> 
> +}
> 
> +
> 
> +/**
> 
> +  Performs an atomic decrement of an 32-bit unsigned integer.
> 
> +
> 
> +  Performs an atomic decrement of the 32-bit unsigned integer specified by
> 
> +  Value and returns the decrement value. The decrement operation must be
> 
> +  performed using MP safe mechanisms. The state of the return value is not
> 
> +  guaranteed to be MP safe.
> 
> +
> 
> +  @param[in]  Value A pointer to the 32-bit value to decrement.
> 
> +
> 
> +  @return The decrement value.
> 
> +
> 
> +**/
> 
> +UINT32
> 
> +EFIAPI
> 
> +InternalSyncDecrement (
> 
> +  IN      volatile UINT32  *Value
> 
> +  )
> 
> +{
> 
> +  UINT32  Temp;
> 
> +
> 
> +  Temp = *Value;
> 
> +  __asm__ __volatile__ (
> 
> +     "dbar    0          \n"
> 
> +     "amadd.w %1, %2, %0 \n"
> 
> +     : "+ZB" (*Value), "=&r" (Temp)
> 
> +     : "r" (-1)
> 
> +     : "memory"
> 
> +  );
> 
> +  return *Value;
> 
> +}
> 
> --
> 2.27.0


^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code.
  2022-09-23 15:57 ` Michael D Kinney
@ 2022-09-24  1:57   ` Chao Li
  0 siblings, 0 replies; 4+ messages in thread
From: Chao Li @ 2022-09-24  1:57 UTC (permalink / raw)
  To: "Kinney, Michael D"
  Cc: "devel@edk2.groups.io", "Gao, Liming",
	"Liu, Zhiguang", Baoqi Zhang

[-- Attachment #1: Type: text/plain, Size: 11736 bytes --]

Mike,
Okay, I'll finish today, and manifest it in the V3.

Thanks,
Chao
--------

On 9月 23 2022, at 11:57 晚上, "Kinney, Michael D" <michael.d.kinney@intel.com> wrote:
> Comments below.
>
> Recommend moving inline assembly to .S files too.
> > -----Original Message-----
> > From: Chao Li <lichao@loongson.cn>
> > Sent: Wednesday, September 14, 2022 2:42 AM
> > To: devel@edk2.groups.io
> > Cc: Kinney, Michael D <michael.d.kinney@intel.com>; Gao, Liming <gaoliming@byosoft.com.cn>; Liu, Zhiguang <zhiguang.liu@intel.com>;
> > Baoqi Zhang <zhangbaoqi@loongson.cn>
> > Subject: [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code.
> >
> > REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4053
> >
> > Support LoongArch cache related functions.
> >
> > Cc: Michael D Kinney <michael.d.kinney@intel.com>
> > Cc: Liming Gao <gaoliming@byosoft.com.cn>
> > Cc: Zhiguang Liu <zhiguang.liu@intel.com>
> >
> > Signed-off-by: Chao Li <lichao@loongson.cn>
> > Co-authored-by: Baoqi Zhang <zhangbaoqi@loongson.cn>
> > ---
> > .../BaseSynchronizationLib.inf | 5 +
> > .../LoongArch64/Synchronization.c | 246 ++++++++++++++++++
> > 2 files changed, 251 insertions(+)
> > create mode 100644 MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> >
> > diff --git a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> > b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> > index 02ba12961a..10021f3352 100755
> > --- a/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> > +++ b/MdePkg/Library/BaseSynchronizationLib/BaseSynchronizationLib.inf
> > @@ -4,6 +4,7 @@
> > # Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
> >
> > # Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
> >
> > # Copyright (c) 2020, Hewlett Packard Enterprise Development LP. All rights reserved.<BR>
> >
> > +# Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
> >
> > #
> >
> > # SPDX-License-Identifier: BSD-2-Clause-Patent
> >
> > #
> >
> > @@ -82,6 +83,10 @@
> > Synchronization.c
> >
> > RiscV64/Synchronization.S
> >
> >
> >
> > +[Sources.LOONGARCH64]
> >
> > + Synchronization.c
> >
> > + LoongArch64/Synchronization.c
> >
> > +
> >
> > [Packages]
> >
> > MdePkg/MdePkg.dec
> >
> >
> >
> > diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> > b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> > new file mode 100644
> > index 0000000000..b7789f3212
> > --- /dev/null
> > +++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> > @@ -0,0 +1,246 @@
> > +/** @file
> >
> > + LoongArch synchronization functions.
> >
> > +
> >
> > + Copyright (c) 2022, Loongson Technology Corporation Limited. All rights reserved.<BR>
> >
> > +
> >
> > + SPDX-License-Identifier: BSD-2-Clause-Patent
> >
> > +
> >
> > +**/
> >
> > +
> >
> > +#include <Library/DebugLib.h>
> >
> > +
> >
> > +/**
> >
> > + Performs an atomic compare exchange operation on a 16-bit
> >
> > + unsigned integer.
> >
> > +
> >
> > + Performs an atomic compare exchange operation on the 16-bit
> >
> > + unsigned integer specified by Value. If Value is equal to
> >
> > + CompareValue, then Value is set to ExchangeValue and
> >
> > + CompareValue is returned. If Value is not equal to
> >
> > + CompareValue, then Value is returned. The compare exchange
> >
> > + operation must be performed using MP safe mechanisms.
> >
> > +
> >
> > + @param[in] Value A pointer to the 16-bit value for the
> >
> > + compare exchange operation.
> >
> > + @param[in] CompareValue 16-bit value used in compare operation.
> >
> > + @param[in] ExchangeValue 16-bit value used in exchange operation.
> >
> > +
> >
> > + @return The original *Value before exchange.
> >
> > +
> >
> > +**/
> >
> > +UINT16
> >
> > +EFIAPI
> >
> > +InternalSyncCompareExchange16 (
> >
> > + IN volatile UINT16 *Value,
> >
> > + IN UINT16 CompareValue,
> >
> > + IN UINT16 ExchangeValue
> >
> > + )
> >
> > +{
> >
> > + UINT32 RetValue;
> >
> > + UINT32 Temp;
> >
> > + UINT32 Shift;
> >
> > + UINT64 Mask;
> >
> > + UINT64 LocalCompareValue;
> >
> > + UINT64 LocalExchangeValue;
> >
> > + volatile UINT32 *Ptr32;
> >
> > +
> >
> > + /* Check that ptr is naturally aligned */
> >
> > + ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));
>
> Recommend using macros in Base.h t check alignment.
> >
> > +
> >
> > + /* Mask inputs to the correct size. */
> >
> > + Mask = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));
>
> This statement is hard to read. Especially with ~0 and << 0. Base.h has MAX_UINT64 which may be better than ~0.
> >
> > + LocalCompareValue = ((UINT64)CompareValue) & Mask;
> >
> > + LocalExchangeValue = ((UINT64)ExchangeValue) & Mask;
> >
> > +
> >
> > + /*
> >
> > + * Calculate a shift & mask that correspond to the value we wish to
> >
> > + * compare & exchange within the naturally aligned 4 byte integer
> >
> > + * that includes it.
> >
> > + */
> >
> > + Shift = (UINT64)Value & 0x3;
> >
> > + Shift *= 8; /* BITS_PER_BYTE */
> >
> > + LocalCompareValue <<= Shift;
> >
> > + LocalExchangeValue <<= Shift;
> >
> > + Mask <<= Shift;
> >
> > +
> >
> > + /*
> >
> > + * Calculate a pointer to the naturally aligned 4 byte integer that
> >
> > + * includes our byte of interest, and load its value.
> >
> > + */
> >
> > + Ptr32 = (UINT32 *)((UINT64)Value & ~0x3);
> >
> > +
> >
> > + __asm__ __volatile__ (
> >
> > + "1: \n"
> >
> > + "ll.w %0, %3 \n"
> >
> > + "and %1, %0, %4 \n"
> >
> > + "bne %1, %5, 2f \n"
> >
> > + "andn %1, %0, %4 \n"
> >
> > + "or %1, %1, %6 \n"
> >
> > + "sc.w %1, %2 \n"
> >
> > + "beqz %1, 1b \n"
> >
> > + "b 3f \n"
> >
> > + "2: \n"
> >
> > + "dbar 0 \n"
> >
> > + "3: \n"
> >
> > + : "=&r" (RetValue), "=&r" (Temp), "=" "ZC" (*Ptr32)
> >
> > + : "ZC" (*Ptr32), "Jr" (Mask), "Jr" (LocalCompareValue), "Jr" (LocalExchangeValue)
> >
> > + : "memory"
> >
> > + );
>
> Recommend removing inline assembly
> >
> > +
> >
> > + return (RetValue & Mask) >> Shift;
> >
> > +}
> >
> > +
> >
> > +/**
> >
> > + Performs an atomic compare exchange operation on a 32-bit
> >
> > + unsigned integer.
> >
> > +
> >
> > + Performs an atomic compare exchange operation on the 32-bit
> >
> > + unsigned integer specified by Value. If Value is equal to
> >
> > + CompareValue, then Value is set to ExchangeValue and
> >
> > + CompareValue is returned. If Value is not equal to
> >
> > + CompareValue, then Value is returned. The compare exchange
> >
> > + operation must be performed using MP safe mechanisms.
> >
> > +
> >
> > + @param[in] Value A pointer to the 32-bit value for the
> >
> > + compare exchange operation.
> >
> > + @param[in] CompareValue 32-bit value used in compare operation.
> >
> > + @param[in] ExchangeValue 32-bit value used in exchange operation.
> >
> > +
> >
> > + @return The original *Value before exchange.
> >
> > +
> >
> > +**/
> >
> > +UINT32
> >
> > +EFIAPI
> >
> > +InternalSyncCompareExchange32 (
> >
> > + IN volatile UINT32 *Value,
> >
> > + IN UINT32 CompareValue,
> >
> > + IN UINT32 ExchangeValue
> >
> > + )
> >
> > +{
> >
> > + UINT32 RetValue;
> >
> > +
> >
> > + __asm__ __volatile__ (
> >
> > + "1: \n"
> >
> > + "ll.w %0, %2 \n"
> >
> > + "bne %0, %3, 2f \n"
> >
> > + "move %0, %4 \n"
> >
> > + "sc.w %0, %1 \n"
> >
> > + "beqz %0, 1b \n"
> >
> > + "b 3f \n"
> >
> > + "2: \n"
> >
> > + "dbar 0 \n"
> >
> > + "3: \n"
> >
> > + : "=&r" (RetValue), "=" "ZC" (*Value)
> >
> > + : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
> >
> > + : "memory"
> >
> > + );
>
> Recommend removing inline assembly
> >
> > + return RetValue;
> >
> > +}
> >
> > +
> >
> > +/**
> >
> > + Performs an atomic compare exchange operation on a 64-bit unsigned integer.
> >
> > +
> >
> > + Performs an atomic compare exchange operation on the 64-bit unsigned integer specified
> >
> > + by Value. If Value is equal to CompareValue, then Value is set to ExchangeValue and
> >
> > + CompareValue is returned. If Value is not equal to CompareValue, then Value is returned.
> >
> > + The compare exchange operation must be performed using MP safe mechanisms.
> >
> > +
> >
> > + @param[in] Value A pointer to the 64-bit value for the compare exchange
> >
> > + operation.
> >
> > + @param[in] CompareValue 64-bit value used in compare operation.
> >
> > + @param[in] ExchangeValue 64-bit value used in exchange operation.
> >
> > +
> >
> > + @return The original *Value before exchange.
> >
> > +
> >
> > +**/
> >
> > +UINT64
> >
> > +EFIAPI
> >
> > +InternalSyncCompareExchange64 (
> >
> > + IN volatile UINT64 *Value,
> >
> > + IN UINT64 CompareValue,
> >
> > + IN UINT64 ExchangeValue
> >
> > + )
> >
> > +{
> >
> > + UINT64 RetValue;
> >
> > +
> >
> > + __asm__ __volatile__ (
> >
> > + "1: \n"
> >
> > + "ll.d %0, %2 \n"
> >
> > + "bne %0, %3, 2f \n"
> >
> > + "move %0, %4 \n"
> >
> > + "sc.d %0, %1 \n"
> >
> > + "beqz %0, 1b \n"
> >
> > + "b 3f \n"
> >
> > + "2: \n"
> >
> > + "dbar 0 \n"
> >
> > + "3: \n"
> >
> > + : "=&r" (RetValue), "=" "ZC" (*Value)
> >
> > + : "ZC" (*Value), "Jr" (CompareValue), "Jr" (ExchangeValue)
> >
> > + : "memory"
> >
> > + );
> >
> > + return RetValue;
> >
> > +}
> >
> > +
> >
> > +/**
> >
> > + Performs an atomic increment of an 32-bit unsigned integer.
> >
> > +
> >
> > + Performs an atomic increment of the 32-bit unsigned integer specified by
> >
> > + Value and returns the incremented value. The increment operation must be
> >
> > + performed using MP safe mechanisms. The state of the return value is not
> >
> > + guaranteed to be MP safe.
> >
> > +
> >
> > + @param[in] Value A pointer to the 32-bit value to increment.
> >
> > +
> >
> > + @return The incremented value.
> >
> > +
> >
> > +**/
> >
> > +UINT32
> >
> > +EFIAPI
> >
> > +InternalSyncIncrement (
> >
> > + IN volatile UINT32 *Value
> >
> > + )
> >
> > +{
> >
> > + UINT32 Temp;
> >
> > +
> >
> > + Temp = *Value;
> >
> > + __asm__ __volatile__ (
> >
> > + "dbar 0 \n"
> >
> > + "amadd.w %1, %2, %0 \n"
> >
> > + : "+ZB" (*Value), "=&r" (Temp)
> >
> > + : "r" (1)
> >
> > + : "memory"
> >
> > + );
> >
> > + return *Value;
> >
> > +}
> >
> > +
> >
> > +/**
> >
> > + Performs an atomic decrement of an 32-bit unsigned integer.
> >
> > +
> >
> > + Performs an atomic decrement of the 32-bit unsigned integer specified by
> >
> > + Value and returns the decrement value. The decrement operation must be
> >
> > + performed using MP safe mechanisms. The state of the return value is not
> >
> > + guaranteed to be MP safe.
> >
> > +
> >
> > + @param[in] Value A pointer to the 32-bit value to decrement.
> >
> > +
> >
> > + @return The decrement value.
> >
> > +
> >
> > +**/
> >
> > +UINT32
> >
> > +EFIAPI
> >
> > +InternalSyncDecrement (
> >
> > + IN volatile UINT32 *Value
> >
> > + )
> >
> > +{
> >
> > + UINT32 Temp;
> >
> > +
> >
> > + Temp = *Value;
> >
> > + __asm__ __volatile__ (
> >
> > + "dbar 0 \n"
> >
> > + "amadd.w %1, %2, %0 \n"
> >
> > + : "+ZB" (*Value), "=&r" (Temp)
> >
> > + : "r" (-1)
> >
> > + : "memory"
> >
> > + );
> >
> > + return *Value;
> >
> > +}
> >
> > --
> > 2.27.0
>


[-- Attachment #2: Type: text/html, Size: 17869 bytes --]

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-09-24  1:57 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-09-14  9:41 [PATCH v2 28/34] MdePkg/BaseSynchronizationLib: LoongArch cache related code Chao Li
2022-09-23  7:21 ` Chao Li
2022-09-23 15:57 ` Michael D Kinney
2022-09-24  1:57   ` Chao Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox