From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by mx.groups.io with SMTP id smtpd.web10.10926.1589975114078314485 for ; Wed, 20 May 2020 04:45:14 -0700 Authentication-Results: mx.groups.io; dkim=missing; spf=pass (domain: arm.com, ip: 217.140.110.172, mailfrom: ard.biesheuvel@arm.com) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id B0B971045; Wed, 20 May 2020 04:45:12 -0700 (PDT) Received: from e123331-lin.nice.arm.com (unknown [172.31.20.19]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 0AD853F305; Wed, 20 May 2020 04:45:10 -0700 (PDT) From: "Ard Biesheuvel" To: devel@edk2.groups.io Cc: glin@suse.com, leif@nuviainc.com, lersek@redhat.com, liming.gao@intel.com, Ard Biesheuvel Subject: [PATCH v2] ArmPkg/CompilerIntrinsicsLib: provide atomics intrinsics Date: Wed, 20 May 2020 13:44:48 +0200 Message-Id: <20200520114448.26104-1-ard.biesheuvel@arm.com> X-Mailer: git-send-email 2.17.1 Gary reports the GCC 10 will emit calls to atomics intrinsics routines unless -mno-outline-atomics is specified. This means GCC-10 introduces new intrinsics, and even though it would be possible to work around this by specifying the command line option, this would require a new GCC10 toolchain profile to be created, which we prefer to avoid. So instead, add the new intrinsics to our library so they are provided when necessary. Signed-off-by: Ard Biesheuvel --- v2: - add missing .globl to export the functions from the object file - add function end markers so the size of each is visible in the ELF metadata - add some comments to describe what is going on ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf | 3 + ArmPkg/Library/CompilerIntrinsicsLib/AArch64/Atomics.S | 142 ++++++++++++++++++++ 2 files changed, 145 insertions(+) diff --git a/ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf b/ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf index d5bad9467758..fcf48c678119 100644 --- a/ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf +++ b/ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf @@ -79,6 +79,9 @@ [Sources.ARM] Arm/ldivmod.asm | MSFT Arm/llsr.asm | MSFT +[Sources.AARCH64] + AArch64/Atomics.S | GCC + [Packages] MdePkg/MdePkg.dec ArmPkg/ArmPkg.dec diff --git a/ArmPkg/Library/CompilerIntrinsicsLib/AArch64/Atomics.S b/ArmPkg/Library/CompilerIntrinsicsLib/AArch64/Atomics.S new file mode 100644 index 000000000000..dc61d6bb8e52 --- /dev/null +++ b/ArmPkg/Library/CompilerIntrinsicsLib/AArch64/Atomics.S @@ -0,0 +1,142 @@ +#------------------------------------------------------------------------------ +# +# Copyright (c) 2020, Arm, Limited. All rights reserved.
+# +# SPDX-License-Identifier: BSD-2-Clause-Patent +# +#------------------------------------------------------------------------------ + + /* + * Provide the GCC intrinsics that are required when using GCC 9 or + * later with the -moutline-atomics options (which became the default + * in GCC 10) + */ + .arch armv8-a + + .macro reg_alias, pfx, sz + r0_\sz .req \pfx\()0 + r1_\sz .req \pfx\()1 + tmp0_\sz .req \pfx\()16 + tmp1_\sz .req \pfx\()17 + .endm + + /* + * Define register aliases of the right type for each size + * (xN for 8 bytes, wN for everything smaller) + */ + reg_alias w, 1 + reg_alias w, 2 + reg_alias w, 4 + reg_alias x, 8 + + .macro fn_start, name:req + .section .text.\name + .globl \name + .type \name, %function +\name\(): + .endm + + .macro fn_end, name:req + .size \name, . - \name + .endm + + /* + * Emit an atomic helper for \model with operands of size \sz, using + * the operation specified by \insn (which is the LSE name), and which + * can be implemented using the generic load-locked/store-conditional + * (LL/SC) sequence below, using the arithmetic operation given by + * \opc. + */ + .macro emit_ld_sz, sz:req, insn:req, opc:req, model:req, s, a, l + fn_start __aarch64_\insn\()\sz\()\model + mov tmp0_\sz, r0_\sz +0: ld\a\()xr\s r0_\sz, [x1] + .ifnc \insn, swp + \opc tmp1_\sz, r0_\sz, tmp0_\sz + .else + \opc tmp1_\sz, tmp0_\sz + .endif + st\l\()xr\s w15, tmp1_\sz, [x1] + cbnz w15, 0b + ret + fn_end __aarch64_\insn\()\sz\()\model + .endm + + /* + * Emit atomic helpers for \model for operand sizes in the + * set {1, 2, 4, 8}, for the instruction pattern given by + * \insn. (This is the LSE name, but this implementation uses + * the generic LL/SC sequence using \opc as the arithmetic + * operation on the target.) + */ + .macro emit_ld, insn:req, opc:req, model:req, a, l + emit_ld_sz 1, \insn, \opc, \model, b, \a, \l + emit_ld_sz 2, \insn, \opc, \model, h, \a, \l + emit_ld_sz 4, \insn, \opc, \model, , \a, \l + emit_ld_sz 8, \insn, \opc, \model, , \a, \l + .endm + + /* + * Emit the compare and swap helper for \model and size \sz + * using LL/SC instructions. + */ + .macro emit_cas_sz, sz:req, model:req, uxt:req, s, a, l + fn_start __aarch64_cas\sz\()\model + \uxt tmp0_\sz, r0_\sz +0: ld\a\()xr\s r0_\sz, [x2] + cmp r0_\sz, tmp0_\sz + bne 1f + st\l\()xr\s w15, r1_\sz, [x2] + cbnz w15, 0b +1: ret + fn_end __aarch64_cas\sz\()\model + .endm + + /* + * Emit compare-and-swap helpers for \model for operand sizes in the + * set {1, 2, 4, 8, 16}. + */ + .macro emit_cas, model:req, a, l + emit_cas_sz 1, \model, uxtb, b, \a, \l + emit_cas_sz 2, \model, uxth, h, \a, \l + emit_cas_sz 4, \model, mov , , \a, \l + emit_cas_sz 8, \model, mov , , \a, \l + + /* + * We cannot use the parameterized sequence for 16 byte CAS, so we + * need to define it explicitly. + */ + fn_start __aarch64_cas16\model + mov x16, x0 + mov x17, x1 +0: ld\a\()xp x0, x1, [x4] + cmp x0, x16 + ccmp x1, x17, #0, eq + bne 1f + st\l\()xp w15, x16, x17, [x4] + cbnz w15, 0b +1: ret + fn_end __aarch64_cas16\model + .endm + + /* + * Emit the set of GCC outline atomic helper functions for + * the memory ordering model given by \model: + * - relax unordered loads and stores + * - acq load-acquire, unordered store + * - rel unordered load, store-release + * - acq_rel load-acquire, store-release + */ + .macro emit_model, model:req, a, l + emit_ld ldadd, add, \model, \a, \l + emit_ld ldclr, bic, \model, \a, \l + emit_ld ldeor, eor, \model, \a, \l + emit_ld ldset, orr, \model, \a, \l + emit_ld swp, mov, \model, \a, \l + emit_cas \model, \a, \l + .endm + + emit_model _relax + emit_model _acq, a + emit_model _rel,, l + emit_model _acq_rel, a, l -- 2.17.1