public inbox for devel@edk2.groups.io
 help / color / mirror / Atom feed
* [PATCH v2] BaseSynchronizationLib: Fix LoongArch64 synchronization functions
@ 2023-04-28  4:20 Dongyan Qian
  2023-04-28  4:24 ` [edk2-devel] " Chao Li
  0 siblings, 1 reply; 2+ messages in thread
From: Dongyan Qian @ 2023-04-28  4:20 UTC (permalink / raw)
  To: devel; +Cc: Michael D Kinney, Liming Gao, Zhiguang Liu, Chao Li

REF: https://bugzilla.tianocore.org/show_bug.cgi?id=4432

There is a return value bug:
The sc.w/sc.d instruction will destroy the reg_t0,
use reg_t1 to avoid context reg_t0 being corrupted.
Adjust Check that ptr align is UINT16.
Optimize function SyncIncrement and SyncDecrement.

Cc: Michael D Kinney <michael.d.kinney@intel.com>
Cc: Liming Gao <gaoliming@byosoft.com.cn>
Cc: Zhiguang Liu <zhiguang.liu@intel.com>
Cc: Chao Li <lichao@loongson.cn>
Signed-off-by: Dongyan Qian <qiandongyan@loongson.cn>
---
 .../LoongArch64/AsmSynchronization.S          | 30 ++++++++-----------
 .../LoongArch64/Synchronization.c             |  2 +-
 2 files changed, 13 insertions(+), 19 deletions(-)

diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
index fdd50c54b5..03865bf2c9 100644
--- a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
+++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
@@ -53,9 +53,9 @@ ASM_PFX(AsmInternalSyncCompareExchange32):
 1:
   ll.w  $t0, $a0, 0x0
   bne   $t0, $a1, 2f
-  move  $t0, $a2
-  sc.w  $t0, $a0, 0x0
-  beqz  $t0, 1b
+  move  $t1, $a2
+  sc.w  $t1, $a0, 0x0
+  beqz  $t1, 1b
   b     3f
 2:
   dbar  0
@@ -76,9 +76,9 @@ ASM_PFX(AsmInternalSyncCompareExchange64):
 1:
   ll.d  $t0, $a0, 0x0
   bne   $t0, $a1, 2f
-  move  $t0, $a2
-  sc.d  $t0, $a0, 0x0
-  beqz  $t0, 1b
+  move  $t1, $a2
+  sc.d  $t1, $a0, 0x0
+  beqz  $t1, 1b
   b     3f
 2:
   dbar  0
@@ -94,13 +94,10 @@ AsmInternalSyncIncrement (
   )
 **/
 ASM_PFX(AsmInternalSyncIncrement):
-  move     $t0, $a0
-  dbar     0
-  ld.w     $t1, $t0, 0x0
-  li.w     $t2, 1
-  amadd.w  $t1, $t2, $t0
+  li.w     $t0, 1
+  amadd.w  $zero, $t0, $a0
 
-  ld.w     $a0, $t0, 0x0
+  ld.w     $a0, $a0, 0
   jirl     $zero, $ra, 0
 
 /**
@@ -111,12 +108,9 @@ AsmInternalSyncDecrement (
   )
 **/
 ASM_PFX(AsmInternalSyncDecrement):
-  move     $t0, $a0
-  dbar     0
-  ld.w     $t1, $t0, 0x0
-  li.w     $t2, -1
-  amadd.w  $t1, $t2, $t0
+  li.w     $t0, -1
+  amadd.w  $zero, $t0, $a0
 
-  ld.w     $a0, $t0, 0x0
+  ld.w     $a0, $a0, 0
   jirl     $zero, $ra, 0
 .end
diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
index d696c8ce10..6baf841c9b 100644
--- a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
+++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
@@ -81,7 +81,7 @@ InternalSyncCompareExchange16 (
   volatile UINT32  *Ptr32;
 
   /* Check that ptr is naturally aligned */
-  ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));
+  ASSERT (!((UINT64)Value & (sizeof (UINT16) - 1)));
 
   /* Mask inputs to the correct size. */
   Mask               = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));
-- 
2.27.0


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [edk2-devel] [PATCH v2] BaseSynchronizationLib: Fix LoongArch64 synchronization functions
  2023-04-28  4:20 [PATCH v2] BaseSynchronizationLib: Fix LoongArch64 synchronization functions Dongyan Qian
@ 2023-04-28  4:24 ` Chao Li
  0 siblings, 0 replies; 2+ messages in thread
From: Chao Li @ 2023-04-28  4:24 UTC (permalink / raw)
  To: devel, qiandongyan; +Cc: Michael D Kinney, Liming Gao, Zhiguang Liu

[-- Attachment #1: Type: text/plain, Size: 3232 bytes --]

Reviewed-by: Chao Li <lichao@loongson.cn>


Thanks,
Chao
在 2023/4/28 12:20, Dongyan Qian 写道:
> REF:https://bugzilla.tianocore.org/show_bug.cgi?id=4432
>
> There is a return value bug:
> The sc.w/sc.d instruction will destroy the reg_t0,
> use reg_t1 to avoid context reg_t0 being corrupted.
> Adjust Check that ptr align is UINT16.
> Optimize function SyncIncrement and SyncDecrement.
>
> Cc: Michael D Kinney<michael.d.kinney@intel.com>
> Cc: Liming Gao<gaoliming@byosoft.com.cn>
> Cc: Zhiguang Liu<zhiguang.liu@intel.com>
> Cc: Chao Li<lichao@loongson.cn>
> Signed-off-by: Dongyan Qian<qiandongyan@loongson.cn>
> ---
>   .../LoongArch64/AsmSynchronization.S          | 30 ++++++++-----------
>   .../LoongArch64/Synchronization.c             |  2 +-
>   2 files changed, 13 insertions(+), 19 deletions(-)
>
> diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
> index fdd50c54b5..03865bf2c9 100644
> --- a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
> +++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/AsmSynchronization.S
> @@ -53,9 +53,9 @@ ASM_PFX(AsmInternalSyncCompareExchange32):
>   1:
>     ll.w  $t0, $a0, 0x0
>     bne   $t0, $a1, 2f
> -  move  $t0, $a2
> -  sc.w  $t0, $a0, 0x0
> -  beqz  $t0, 1b
> +  move  $t1, $a2
> +  sc.w  $t1, $a0, 0x0
> +  beqz  $t1, 1b
>     b     3f
>   2:
>     dbar  0
> @@ -76,9 +76,9 @@ ASM_PFX(AsmInternalSyncCompareExchange64):
>   1:
>     ll.d  $t0, $a0, 0x0
>     bne   $t0, $a1, 2f
> -  move  $t0, $a2
> -  sc.d  $t0, $a0, 0x0
> -  beqz  $t0, 1b
> +  move  $t1, $a2
> +  sc.d  $t1, $a0, 0x0
> +  beqz  $t1, 1b
>     b     3f
>   2:
>     dbar  0
> @@ -94,13 +94,10 @@ AsmInternalSyncIncrement (
>     )
>   **/
>   ASM_PFX(AsmInternalSyncIncrement):
> -  move     $t0, $a0
> -  dbar     0
> -  ld.w     $t1, $t0, 0x0
> -  li.w     $t2, 1
> -  amadd.w  $t1, $t2, $t0
> +  li.w     $t0, 1
> +  amadd.w  $zero, $t0, $a0
>   
> -  ld.w     $a0, $t0, 0x0
> +  ld.w     $a0, $a0, 0
>     jirl     $zero, $ra, 0
>   
>   /**
> @@ -111,12 +108,9 @@ AsmInternalSyncDecrement (
>     )
>   **/
>   ASM_PFX(AsmInternalSyncDecrement):
> -  move     $t0, $a0
> -  dbar     0
> -  ld.w     $t1, $t0, 0x0
> -  li.w     $t2, -1
> -  amadd.w  $t1, $t2, $t0
> +  li.w     $t0, -1
> +  amadd.w  $zero, $t0, $a0
>   
> -  ld.w     $a0, $t0, 0x0
> +  ld.w     $a0, $a0, 0
>     jirl     $zero, $ra, 0
>   .end
> diff --git a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> index d696c8ce10..6baf841c9b 100644
> --- a/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> +++ b/MdePkg/Library/BaseSynchronizationLib/LoongArch64/Synchronization.c
> @@ -81,7 +81,7 @@ InternalSyncCompareExchange16 (
>     volatile UINT32  *Ptr32;
>   
>     /* Check that ptr is naturally aligned */
> -  ASSERT (!((UINT64)Value & (sizeof (Value) - 1)));
> +  ASSERT (!((UINT64)Value & (sizeof (UINT16) - 1)));
>   
>     /* Mask inputs to the correct size. */
>     Mask               = (((~0UL) - (1UL << (0)) + 1) & (~0UL >> (64 - 1 - ((sizeof (UINT16) * 8) - 1))));

[-- Attachment #2: Type: text/html, Size: 4277 bytes --]

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-04-28  4:24 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-04-28  4:20 [PATCH v2] BaseSynchronizationLib: Fix LoongArch64 synchronization functions Dongyan Qian
2023-04-28  4:24 ` [edk2-devel] " Chao Li

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox