summaryrefslogtreecommitdiff
path: root/linux-arm
diff options
context:
space:
mode:
authorDavid Benjamin <davidben@google.com>2016-12-06 18:25:50 -0500
committerDavid Benjamin <davidben@google.com>2016-12-08 16:34:47 -0500
commit1b249678059ecd918235790a7a0471771cc4e5ce (patch)
tree20c2249ef44fdc535da6b6eb0e43e00d437d4d40 /linux-arm
parent909b19f027eb0af12513f4d5589efdd67e34bd91 (diff)
downloadboringssl-1b249678059ecd918235790a7a0471771cc4e5ce.tar.gz
external/boringssl: Sync to 7c5728649affe20e2952b11a0aeaf0e7b114aad9.
This includes the following changes: https://boringssl.googlesource.com/boringssl/+log/68f37b7a3f451aa1ca8c93669c024d01f6270ae8..7c5728649affe20e2952b11a0aeaf0e7b114aad9 This also removes sha256-armv4.S from libcrypto_sources_no_clang; clang can assemble it now. The other files still need to be there though. Note this pulls in a fix to a wpa_supplicant regression introduced in c895d6b1c580258e72e1ed3fcc86d38970ded9e1. Test: make checkbuild Test: cts-tradefed run cts -m CtsLibcoreTestCases -a arm64-v8a Test: cts-tradefed run cts -m CtsLibcoreOkHttpTestCases -a arm64-v8a Change-Id: Ife1d9ea1c87a0b7b1814b8e3590d6f1eaf721629
Diffstat (limited to 'linux-arm')
-rw-r--r--linux-arm/crypto/sha/sha256-armv4.S36
1 files changed, 17 insertions, 19 deletions
diff --git a/linux-arm/crypto/sha/sha256-armv4.S b/linux-arm/crypto/sha/sha256-armv4.S
index 60400413..f37fd7c7 100644
--- a/linux-arm/crypto/sha/sha256-armv4.S
+++ b/linux-arm/crypto/sha/sha256-armv4.S
@@ -1,4 +1,11 @@
#if defined(__arm__)
+@ Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
+@
+@ Licensed under the OpenSSL license (the "License"). You may not use
+@ this file except in compliance with the License. You can obtain a copy
+@ in the file LICENSE in the source distribution or at
+@ https://www.openssl.org/source/license.html
+
@ ====================================================================
@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
@@ -45,16 +52,11 @@
#endif
.text
-#if __ARM_ARCH__<7
-.code 32
-#else
+#if defined(__thumb2__)
.syntax unified
-# if defined(__thumb2__) && !defined(__APPLE__)
-# define adrl adr
.thumb
-# else
+#else
.code 32
-# endif
#endif
.type K256,%object
@@ -89,10 +91,10 @@ K256:
.type sha256_block_data_order,%function
sha256_block_data_order:
.Lsha256_block_data_order:
-#if __ARM_ARCH__<7
+#if __ARM_ARCH__<7 && !defined(__thumb2__)
sub r3,pc,#8 @ sha256_block_data_order
#else
- adr r3,sha256_block_data_order
+ adr r3,.Lsha256_block_data_order
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
ldr r12,.LOPENSSL_armcap
@@ -1878,13 +1880,14 @@ sha256_block_data_order:
.globl sha256_block_data_order_neon
.hidden sha256_block_data_order_neon
.type sha256_block_data_order_neon,%function
-.align 4
+.align 5
+.skip 16
sha256_block_data_order_neon:
.LNEON:
stmdb sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
sub r11,sp,#16*4+16
- adrl r14,K256
+ adr r14,K256
bic r11,r11,#15 @ align for 128-bit stores
mov r12,sp
mov sp,r11 @ alloca
@@ -2660,7 +2663,7 @@ sha256_block_data_order_neon:
#endif
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-# if defined(__thumb2__) && !defined(__APPLE__)
+# if defined(__thumb2__)
# define INST(a,b,c,d) .byte c,d|0xc,a,b
# else
# define INST(a,b,c,d) .byte a,b,c,d
@@ -2671,16 +2674,11 @@ sha256_block_data_order_neon:
sha256_block_data_order_armv8:
.LARMv8:
vld1.32 {q0,q1},[r0]
-# ifdef __APPLE__
sub r3,r3,#256+32
-# elif defined(__thumb2__)
- adr r3,.LARMv8
- sub r3,r3,#.LARMv8-K256
-# else
- adrl r3,K256
-# endif
add r2,r1,r2,lsl#6 @ len to point at the end of inp
+ b .Loop_v8
+.align 4
.Loop_v8:
vld1.8 {q8,q9},[r1]!
vld1.8 {q10,q11},[r1]!