From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-wm0-x22b.google.com (mail-wm0-x22b.google.com [IPv6:2a00:1450:400c:c09::22b]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by ml01.01.org (Postfix) with ESMTPS id 1B28F1A1E12 for ; Wed, 17 Aug 2016 07:59:23 -0700 (PDT) Received: by mail-wm0-x22b.google.com with SMTP id q128so203795884wma.1 for ; Wed, 17 Aug 2016 07:59:23 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linaro.org; s=google; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=nWA0aGtcQDeO71aQyEvshr5B5VaeR+Wn9Ec7bKIshyo=; b=DQZjT/e6VDT6tHTLxLiCgtqQq8fcBEYJwtuiYZCuyCaU7GdzaR5FgrVtB9iOtdiuRR t3kPxxFy5U/U0LMS1KmXGTTP+0pk4jNjbAuttcHe3BBavwoSnuFSWkh6MZ2+smOSDjkQ 1CvS4kR3VmBrrELlBmqwnJW6t3ZXjWo1cAy5g= X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=nWA0aGtcQDeO71aQyEvshr5B5VaeR+Wn9Ec7bKIshyo=; b=EYBf1BGYXhty2ho8CQneNUpHl5KEtsUW/sc5SsX8pGeiwRHzQl1YPeqvgsiVEzZonI 4joEPZa5xIS6DxHEW8KEI79YwaQI0jFkXxPi68PNJ8MWcSQWVw8Y697j2P8CKthcKKb2 ccEzHDfGWoPtmM/BcRTxPdwovX3gLzgt5wg0Ttb6Xc13NPiHo6u3tpE+MlLhbuphAU01 Na7+GVHxOmB/ZV0v3/zAfwhBxcuwkyYXn+ZzGjMEZfm8USrKXR6sukFphlEeWH17l2Mp bf2Kgamp2fycVe0qxI5kgTYEzYuvppIErnJ/0sYuos0nV4j1azy+CzeKbuqwaWBgBENR 1t+A== X-Gm-Message-State: AEkoousmb4iwnqzIAqRBABXeFX535wQ/SIatGlAydaxh/++7J0lYOMsXlahVnvk5XMm45ljq X-Received: by 10.28.212.130 with SMTP id l124mr11274219wmg.108.1471445961376; Wed, 17 Aug 2016 07:59:21 -0700 (PDT) Received: from localhost.localdomain (46.red-81-37-107.dynamicip.rima-tde.net. [81.37.107.46]) by smtp.gmail.com with ESMTPSA id q137sm27035691wmd.19.2016.08.17.07.59.19 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Wed, 17 Aug 2016 07:59:20 -0700 (PDT) From: Ard Biesheuvel To: edk2-devel@lists.01.org, leif.lindholm@linaro.org, jbrasen@codeaurora.org, feng.tian@intel.com, star.zeng@intel.com, daniil.egranov@arm.com Cc: Ard Biesheuvel Date: Wed, 17 Aug 2016 16:59:04 +0200 Message-Id: <1471445945-19239-4-git-send-email-ard.biesheuvel@linaro.org> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1471445945-19239-1-git-send-email-ard.biesheuvel@linaro.org> References: <1471445945-19239-1-git-send-email-ard.biesheuvel@linaro.org> Subject: [PATCH v3 3/4] MdeModulePkg/EbxDxe AARCH64: use tail call for EBC to native thunk X-BeenThere: edk2-devel@lists.01.org X-Mailman-Version: 2.1.21 Precedence: list List-Id: EDK II Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Wed, 17 Aug 2016 14:59:23 -0000 Instead of pessimistically copying at least 64 bytes from the VM stack to the native stack, and popping off the register arguments again before doing the native call, try to avoid touching the stack completely if the VM stack frame is < 64 bytes. Also, if the stack frame does exceed 64 bytes, there is no need to copy the first 64 bytes, since we are passing those in registers anyway. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Ard Biesheuvel --- MdeModulePkg/Universal/EbcDxe/AArch64/EbcLowLevel.S | 73 +++++++++++++++----- 1 file changed, 55 insertions(+), 18 deletions(-) diff --git a/MdeModulePkg/Universal/EbcDxe/AArch64/EbcLowLevel.S b/MdeModulePkg/Universal/EbcDxe/AArch64/EbcLowLevel.S index cb7a70b5a4f8..d95713e82b0f 100644 --- a/MdeModulePkg/Universal/EbcDxe/AArch64/EbcLowLevel.S +++ b/MdeModulePkg/Universal/EbcDxe/AArch64/EbcLowLevel.S @@ -35,30 +35,67 @@ ASM_GLOBAL ASM_PFX(mEbcInstructionBufferTemplate) //**************************************************************************** // UINTN EbcLLCALLEXNative(UINTN FuncAddr, UINTN NewStackPointer, VOID *FramePtr) ASM_PFX(EbcLLCALLEXNative): - stp x19, x20, [sp, #-16]! - stp x29, x30, [sp, #-16]! + mov x8, x0 // Preserve x0 + mov x9, x1 // Preserve x1 - mov x19, x0 - mov x20, sp - sub x2, x2, x1 // Length = NewStackPointer-FramePtr - sub sp, sp, x2 - sub sp, sp, #64 // Make sure there is room for at least 8 args in the new stack - mov x0, sp + // + // If the EBC stack frame is smaller than or equal to 64 bytes, we know there + // are no stacked arguments #9 and beyond that we need to copy to the native + // stack. In this case, we can perform a tail call which is much more + // efficient, since there is no need to touch the native stack at all. + // + sub x3, x2, x1 // Length = NewStackPointer - FramePtr + cmp x3, #64 + b.gt 1f - bl CopyMem // Sp, NewStackPointer, Length + adr x0, 0f + sub x0, x0, x3, lsr #1 + br x0 - ldp x0, x1, [sp], #16 - ldp x2, x3, [sp], #16 - ldp x4, x5, [sp], #16 - ldp x6, x7, [sp], #16 + ldr x7, [x9, #56] + ldr x6, [x9, #48] + ldr x5, [x9, #40] + ldr x4, [x9, #32] + ldr x3, [x9, #24] + ldr x2, [x9, #16] + ldr x1, [x9, #8] + ldr x0, [x9] - blr x19 +0: br x8 - mov sp, x20 - ldp x29, x30, [sp], #16 - ldp x19, x20, [sp], #16 + // + // More than 64 bytes: we need to build the full native stack frame and copy + // the part of the VM stack exceeding 64 bytes (which may contain stacked + // arguments) to the native stack + // +1: stp x29, x30, [sp, #-16]! + mov x29, sp - ret + // + // Ensure that the stack pointer remains 16 byte aligned, + // even if the size of the VM stack frame is not a multiple of 16 + // + add x1, x1, #64 // Skip over [potential] reg params + tbz x3, #3, 2f // Multiple of 16? + ldr x4, [x2, #-8]! // No? Then push one word + str x4, [sp, #-16]! // ... but use two slots + b 3f + +2: ldp x4, x5, [x2, #-16]! + stp x4, x5, [sp, #-16]! +3: cmp x2, x1 + b.gt 2b + + ldp x0, x1, [x9] + ldp x2, x3, [x9, #16] + ldp x4, x5, [x9, #32] + ldp x6, x7, [x9, #48] + + blr x8 + + mov sp, x29 + ldp x29, x30, [sp], #16 + ret //**************************************************************************** // EbcLLEbcInterpret -- 2.7.4