diff --git a/crypto/chacha/asm/chacha-armv4.pl b/crypto/chacha/asm/chacha-armv4.pl index 0b1592d6b9..0604cebdc8 100755 --- a/crypto/chacha/asm/chacha-armv4.pl +++ b/crypto/chacha/asm/chacha-armv4.pl @@ -212,7 +212,7 @@ sub ROUND { .LChaCha20_ctr32: ldr r12,[sp,#0] @ pull pointer to counter and nonce stmdb sp!,{r0-r2,r4-r11,lr} -#if __ARM_ARCH__<7 && !defined(__thumb2__) +#if __ARM_ARCH<7 && !defined(__thumb2__) sub r14,pc,#16 @ ChaCha20_ctr32 #else adr r14,.LChaCha20_ctr32 @@ -294,8 +294,8 @@ sub ROUND { ldr @t[0],[sp,#4*(0)] @ load key material ldr @t[1],[sp,#4*(1)] -#if __ARM_ARCH__>=6 || !defined(__ARMEB__) -# if __ARM_ARCH__<7 +#if __ARM_ARCH>=6 || !defined(__ARMEB__) +# if __ARM_ARCH<7 orr @t[2],r12,r14 tst @t[2],#3 @ are input and output aligned? ldr @t[2],[sp,#4*(2)] @@ -321,7 +321,7 @@ sub ROUND { # endif ldrhs @t[2],[r12,#-8] ldrhs @t[3],[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) +# if __ARM_ARCH>=6 && defined(__ARMEB__) rev @x[0],@x[0] rev @x[1],@x[1] rev @x[2],@x[2] @@ -358,7 +358,7 @@ sub ROUND { # endif ldrhs @t[2],[r12,#-8] ldrhs @t[3],[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) +# if __ARM_ARCH>=6 && defined(__ARMEB__) rev @x[4],@x[4] rev @x[5],@x[5] rev @x[6],@x[6] @@ -403,7 +403,7 @@ sub ROUND { # endif ldrhs @t[2],[r12,#-8] ldrhs @t[3],[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) +# if __ARM_ARCH>=6 && defined(__ARMEB__) rev @x[0],@x[0] rev @x[1],@x[1] rev @x[2],@x[2] @@ -445,7 +445,7 @@ sub ROUND { # endif ldrhs @t[2],[r12,#-8] ldrhs @t[3],[r12,#-4] -# if __ARM_ARCH__>=6 && defined(__ARMEB__) +# if __ARM_ARCH>=6 && defined(__ARMEB__) rev @x[4],@x[4] rev @x[5],@x[5] rev @x[6],@x[6] @@ -476,7 +476,7 @@ sub ROUND { bhi .Loop_outer beq .Ldone -# if __ARM_ARCH__<7 +# if __ARM_ARCH<7 b .Ltail .align 4 @@ -484,7 +484,7 @@ sub ROUND { cmp @t[3],#64 @ restore flags # endif #endif -#if __ARM_ARCH__<7 +#if __ARM_ARCH<7 ldr @t[3],[sp,#4*(3)] ___ for ($i=0;$i<16;$i+=4) { diff --git a/crypto/fipsmodule/aes/asm/bsaes-armv7.pl b/crypto/fipsmodule/aes/asm/bsaes-armv7.pl index a1f0085e91..ff665a83d4 100644 --- a/crypto/fipsmodule/aes/asm/bsaes-armv7.pl +++ b/crypto/fipsmodule/aes/asm/bsaes-armv7.pl @@ -717,7 +717,6 @@ sub bitslice { # define VFP_ABI_POP # define VFP_ABI_FRAME 0 # define BSAES_ASM_EXTENDED_KEY -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ # define __ARM_MAX_ARCH__ 7 #endif diff --git a/crypto/fipsmodule/bn/asm/armv4-mont.pl b/crypto/fipsmodule/bn/asm/armv4-mont.pl index c145f857de..1e72af6692 100644 --- a/crypto/fipsmodule/bn/asm/armv4-mont.pl +++ b/crypto/fipsmodule/bn/asm/armv4-mont.pl @@ -287,7 +287,7 @@ add sp,sp,#2*4 @ skip over {r0,r2} mov r0,#1 .Labrt: -#if __ARM_ARCH__>=5 +#if __ARM_ARCH>=5 ret @ bx lr #else tst lr,#1 diff --git a/crypto/fipsmodule/sha/asm/sha256-armv4.pl b/crypto/fipsmodule/sha/asm/sha256-armv4.pl index 82ed50219a..845e1527b3 100644 --- a/crypto/fipsmodule/sha/asm/sha256-armv4.pl +++ b/crypto/fipsmodule/sha/asm/sha256-armv4.pl @@ -86,7 +86,7 @@ sub BODY_00_15 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_; $code.=<<___ if ($i<16); -#if __ARM_ARCH__>=7 +#if __ARM_ARCH>=7 @ ldr $t1,[$inp],#4 @ $i # if $i==15 str $inp,[sp,#17*4] @ make room for $t4 @@ -129,7 +129,7 @@ sub BODY_00_15 { cmp $t2,#0xf2 @ done? #endif #if $i<15 -# if __ARM_ARCH__>=7 +# if __ARM_ARCH>=7 ldr $t1,[$inp],#4 @ prefetch # else ldrb $t1,[$inp,#3] @@ -179,7 +179,7 @@ sub BODY_16_XX { #ifndef __KERNEL__ # include #else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ +# define __ARM_ARCH __LINUX_ARM_ARCH__ # define __ARM_MAX_ARCH__ 7 #endif @@ -229,7 +229,7 @@ sub BODY_16_XX { .type sha256_block_data_order,%function sha256_block_data_order: .Lsha256_block_data_order: -#if __ARM_ARCH__<7 && !defined(__thumb2__) +#if __ARM_ARCH<7 && !defined(__thumb2__) sub r3,pc,#8 @ sha256_block_data_order #else adr r3,.Lsha256_block_data_order @@ -251,7 +251,7 @@ sub BODY_16_XX { sub $Ktbl,r3,#256+32 @ K256 sub sp,sp,#16*4 @ alloca(X[16]) .Loop: -# if __ARM_ARCH__>=7 +# if __ARM_ARCH>=7 ldr $t1,[$inp],#4 # else ldrb $t1,[$inp,#3] @@ -263,7 +263,7 @@ sub BODY_16_XX { $code.=".Lrounds_16_xx:\n"; for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); } $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_ARCH>=7 ite eq @ Thumb2 thing, sanity check in ARM #endif ldreq $t3,[sp,#16*4] @ pull ctx @@ -294,7 +294,7 @@ sub BODY_16_XX { bne .Loop add sp,sp,#`16+3`*4 @ destroy frame -#if __ARM_ARCH__>=5 +#if __ARM_ARCH>=5 ldmia sp!,{r4-r11,pc} #else ldmia sp!,{r4-r11,lr} diff --git a/crypto/fipsmodule/sha/asm/sha512-armv4.pl b/crypto/fipsmodule/sha/asm/sha512-armv4.pl index 1f4ec286ec..1822fb6934 100644 --- a/crypto/fipsmodule/sha/asm/sha512-armv4.pl +++ b/crypto/fipsmodule/sha/asm/sha512-armv4.pl @@ -159,7 +159,7 @@ () teq $t0,#$magic ldr $t3,[sp,#$Coff+0] @ c.lo -#if __ARM_ARCH__>=7 +#if __ARM_ARCH>=7 it eq @ Thumb2 thing, sanity check in ARM #endif orreq $Ktbl,$Ktbl,#1 @@ -204,7 +204,6 @@ () # define VFP_ABI_PUSH vstmdb sp!,{d8-d15} # define VFP_ABI_POP vldmia sp!,{d8-d15} #else -# define __ARM_ARCH__ __LINUX_ARM_ARCH__ # define __ARM_MAX_ARCH__ 7 # define VFP_ABI_PUSH # define VFP_ABI_POP @@ -291,7 +290,7 @@ () .type sha512_block_data_order,%function sha512_block_data_order: .Lsha512_block_data_order: -#if __ARM_ARCH__<7 && !defined(__thumb2__) +#if __ARM_ARCH<7 && !defined(__thumb2__) sub r3,pc,#8 @ sha512_block_data_order #else adr r3,.Lsha512_block_data_order @@ -341,7 +340,7 @@ () str $Thi,[sp,#$Foff+4] .L00_15: -#if __ARM_ARCH__<7 +#if __ARM_ARCH<7 ldrb $Tlo,[$inp,#7] ldrb $t0, [$inp,#6] ldrb $t1, [$inp,#5] @@ -419,7 +418,7 @@ () ___ &BODY_00_15(0x17); $code.=<<___; -#if __ARM_ARCH__>=7 +#if __ARM_ARCH>=7 ittt eq @ Thumb2 thing, sanity check in ARM #endif ldreq $t0,[sp,#`$Xoff+8*(16-1)`+0] @@ -498,7 +497,7 @@ () bne .Loop add sp,sp,#8*9 @ destroy frame -#if __ARM_ARCH__>=5 +#if __ARM_ARCH>=5 ldmia sp!,{r4-r12,pc} #else ldmia sp!,{r4-r12,lr} diff --git a/include/ring-core/arm_arch.h b/include/ring-core/arm_arch.h index 2fc0fc0421..d01d6fb73d 100644 --- a/include/ring-core/arm_arch.h +++ b/include/ring-core/arm_arch.h @@ -73,27 +73,6 @@ // ARMV8_SHA512 indicates support for hardware SHA-512 instructions. #define ARMV8_SHA512 (1 << 6) -#if defined(__ASSEMBLER__) - -// We require the ARM assembler provide |__ARM_ARCH| from Arm C Language -// Extensions (ACLE). This is supported in GCC 4.8+ and Clang 3.2+. MSVC does -// not implement ACLE, but we require Clang's assembler on Windows. -#if !defined(__ARM_ARCH) -#error "ARM assembler must define __ARM_ARCH" -#endif - -// __ARM_ARCH__ is used by OpenSSL assembly to determine the minimum target ARM -// version. -// -// TODO(davidben): Switch the assembly to use |__ARM_ARCH| directly. -#define __ARM_ARCH__ __ARM_ARCH - -// Even when building for 32-bit ARM, support for aarch64 crypto instructions -// will be included. -#define __ARM_MAX_ARCH__ 8 - -#endif // __ASSEMBLER__ - #endif // ARM || AARCH64 #endif // OPENSSL_HEADER_ARM_ARCH_H diff --git a/include/ring-core/asm_base.h b/include/ring-core/asm_base.h index 1d8cf5fb8b..30588727d3 100644 --- a/include/ring-core/asm_base.h +++ b/include/ring-core/asm_base.h @@ -73,14 +73,13 @@ #error "ARM assembler must define __ARM_ARCH" #endif -// __ARM_ARCH__ is used by OpenSSL assembly to determine the minimum target ARM -// version. -// -// TODO(davidben): Switch the assembly to use |__ARM_ARCH| directly. -#define __ARM_ARCH__ __ARM_ARCH - // Even when building for 32-bit ARM, support for aarch64 crypto instructions // will be included. +// +// TODO(davidben): Remove this and the corresponding ifdefs? This is only +// defined because some OpenSSL assembly files would allow disabling the NEON +// code entirely. I think we'd prefer to do that by lifting the dispatch to C +// anyway. #define __ARM_MAX_ARCH__ 8 // Support macros for