deps: enable ARM assembly for OpenSSL
authorFedor Indutny <fedor@indutny.com>
Fri, 22 Aug 2014 10:21:50 +0000 (14:21 +0400)
committerFedor Indutny <fedor@indutny.com>
Tue, 26 Aug 2014 20:18:55 +0000 (00:18 +0400)
fix #8062

deps/openssl/asm/Makefile
deps/openssl/asm/arm-elf-gas/aes/aes-armv4.S [new file with mode: 0644]
deps/openssl/asm/arm-elf-gas/bn/armv4-gf2m.S [new file with mode: 0644]
deps/openssl/asm/arm-elf-gas/bn/armv4-mont.S [new file with mode: 0644]
deps/openssl/asm/arm-elf-gas/modes/ghash-armv4.S [new file with mode: 0644]
deps/openssl/asm/arm-elf-gas/sha/sha1-armv4-large.S [new file with mode: 0644]
deps/openssl/asm/arm-elf-gas/sha/sha256-armv4.S [new file with mode: 0644]
deps/openssl/asm/arm-elf-gas/sha/sha512-armv4.S [new file with mode: 0644]
deps/openssl/openssl.gyp

index 2218859..19a36bb 100644 (file)
@@ -42,6 +42,13 @@ OUTPUTS      = \
        x64-elf-gas/whrlpool/wp-x86_64.s \
        x64-elf-gas/modes/ghash-x86_64.s \
        x64-elf-gas/x86_64cpuid.s \
+       arm-elf-gas/aes/aes-armv4.S \
+       arm-elf-gas/bn/armv4-mont.S \
+       arm-elf-gas/bn/armv4-gf2m.S \
+       arm-elf-gas/sha/sha1-armv4-large.S \
+       arm-elf-gas/sha/sha256-armv4.S \
+       arm-elf-gas/sha/sha512-armv4.S \
+       arm-elf-gas/modes/ghash-armv4.S \
        x86-macosx-gas/aes/aes-586.s \
        x86-macosx-gas/aes/aesni-x86.s \
        x86-macosx-gas/aes/vpaes-x86.s \
@@ -121,7 +128,7 @@ OUTPUTS     = \
        x64-win32-masm/modes/ghash-x86_64.asm \
        x64-win32-masm/x86_64cpuid.asm \
 
-x64-elf-gas/%.s x86-elf-gas/%.s:
+arm-elf-gas/%.S x64-elf-gas/%.s x86-elf-gas/%.s:
        $(PERL) $< elf > $@
 
 x64-macosx-gas/%.s x86-macosx-gas/%.s:
@@ -262,3 +269,10 @@ x86-win32-masm/sha/sha512-586.asm: ../openssl/crypto/sha/asm/sha512-586.pl
 x86-win32-masm/whrlpool/wp-mmx.asm: ../openssl/crypto/whrlpool/asm/wp-mmx.pl
 x86-win32-masm/modes/ghash-x86.asm: ../openssl/crypto/modes/asm/ghash-x86.pl
 x86-win32-masm/x86cpuid.asm: ../openssl/crypto/x86cpuid.pl
+arm-elf-gas/aes/aes-armv4.S: ../openssl/crypto/aes/asm/aes-armv4.pl
+arm-elf-gas/bn/armv4-mont.S: ../openssl/crypto/bn/asm/armv4-mont.pl
+arm-elf-gas/bn/armv4-gf2m.S: ../openssl/crypto/bn/asm/armv4-gf2m.pl
+arm-elf-gas/sha/sha1-armv4-large.S: ../openssl/crypto/sha/asm/sha1-armv4-large.pl
+arm-elf-gas/sha/sha512-armv4.S: ../openssl/crypto/sha/asm/sha512-armv4.pl
+arm-elf-gas/sha/sha256-armv4.S: ../openssl/crypto/sha/asm/sha256-armv4.pl
+arm-elf-gas/modes/ghash-armv4.S: ../openssl/crypto/modes/asm/ghash-armv4.pl
diff --git a/deps/openssl/asm/arm-elf-gas/aes/aes-armv4.S b/deps/openssl/asm/arm-elf-gas/aes/aes-armv4.S
new file mode 100644 (file)
index 0000000..2697d4c
--- /dev/null
@@ -0,0 +1,1071 @@
+#include "arm_arch.h"
+.text
+.code  32
+
+.type  AES_Te,%object
+.align 5
+AES_Te:
+.word  0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d
+.word  0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554
+.word  0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d
+.word  0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a
+.word  0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87
+.word  0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b
+.word  0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea
+.word  0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b
+.word  0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a
+.word  0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f
+.word  0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108
+.word  0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f
+.word  0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e
+.word  0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5
+.word  0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d
+.word  0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f
+.word  0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e
+.word  0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb
+.word  0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce
+.word  0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497
+.word  0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c
+.word  0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed
+.word  0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b
+.word  0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a
+.word  0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16
+.word  0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594
+.word  0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81
+.word  0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3
+.word  0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a
+.word  0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504
+.word  0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163
+.word  0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d
+.word  0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f
+.word  0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739
+.word  0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47
+.word  0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395
+.word  0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f
+.word  0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883
+.word  0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c
+.word  0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76
+.word  0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e
+.word  0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4
+.word  0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6
+.word  0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b
+.word  0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7
+.word  0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0
+.word  0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25
+.word  0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818
+.word  0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72
+.word  0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651
+.word  0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21
+.word  0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85
+.word  0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa
+.word  0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12
+.word  0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0
+.word  0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9
+.word  0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133
+.word  0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7
+.word  0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920
+.word  0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a
+.word  0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17
+.word  0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8
+.word  0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11
+.word  0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a
+@ Te4[256]
+.byte  0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
+.byte  0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
+.byte  0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
+.byte  0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
+.byte  0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
+.byte  0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
+.byte  0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
+.byte  0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
+.byte  0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
+.byte  0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
+.byte  0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
+.byte  0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
+.byte  0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
+.byte  0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
+.byte  0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
+.byte  0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
+.byte  0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
+.byte  0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
+.byte  0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
+.byte  0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
+.byte  0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
+.byte  0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
+.byte  0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
+.byte  0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
+.byte  0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
+.byte  0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
+.byte  0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
+.byte  0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
+.byte  0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
+.byte  0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
+.byte  0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
+.byte  0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+@ rcon[]
+.word  0x01000000, 0x02000000, 0x04000000, 0x08000000
+.word  0x10000000, 0x20000000, 0x40000000, 0x80000000
+.word  0x1B000000, 0x36000000, 0, 0, 0, 0, 0, 0
+.size  AES_Te,.-AES_Te
+
+@ void AES_encrypt(const unsigned char *in, unsigned char *out,
+@               const AES_KEY *key) {
+.global AES_encrypt
+.type   AES_encrypt,%function
+.align 5
+AES_encrypt:
+       sub     r3,pc,#8                @ AES_encrypt
+       stmdb   sp!,{r1,r4-r12,lr}
+       mov     r12,r0          @ inp
+       mov     r11,r2
+       sub     r10,r3,#AES_encrypt-AES_Te      @ Te
+#if __ARM_ARCH__<7
+       ldrb    r0,[r12,#3]     @ load input data in endian-neutral
+       ldrb    r4,[r12,#2]     @ manner...
+       ldrb    r5,[r12,#1]
+       ldrb    r6,[r12,#0]
+       orr     r0,r0,r4,lsl#8
+       ldrb    r1,[r12,#7]
+       orr     r0,r0,r5,lsl#16
+       ldrb    r4,[r12,#6]
+       orr     r0,r0,r6,lsl#24
+       ldrb    r5,[r12,#5]
+       ldrb    r6,[r12,#4]
+       orr     r1,r1,r4,lsl#8
+       ldrb    r2,[r12,#11]
+       orr     r1,r1,r5,lsl#16
+       ldrb    r4,[r12,#10]
+       orr     r1,r1,r6,lsl#24
+       ldrb    r5,[r12,#9]
+       ldrb    r6,[r12,#8]
+       orr     r2,r2,r4,lsl#8
+       ldrb    r3,[r12,#15]
+       orr     r2,r2,r5,lsl#16
+       ldrb    r4,[r12,#14]
+       orr     r2,r2,r6,lsl#24
+       ldrb    r5,[r12,#13]
+       ldrb    r6,[r12,#12]
+       orr     r3,r3,r4,lsl#8
+       orr     r3,r3,r5,lsl#16
+       orr     r3,r3,r6,lsl#24
+#else
+       ldr     r0,[r12,#0]
+       ldr     r1,[r12,#4]
+       ldr     r2,[r12,#8]
+       ldr     r3,[r12,#12]
+#ifdef __ARMEL__
+       rev     r0,r0
+       rev     r1,r1
+       rev     r2,r2
+       rev     r3,r3
+#endif
+#endif
+       bl      _armv4_AES_encrypt
+
+       ldr     r12,[sp],#4             @ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+       rev     r0,r0
+       rev     r1,r1
+       rev     r2,r2
+       rev     r3,r3
+#endif
+       str     r0,[r12,#0]
+       str     r1,[r12,#4]
+       str     r2,[r12,#8]
+       str     r3,[r12,#12]
+#else
+       mov     r4,r0,lsr#24            @ write output in endian-neutral
+       mov     r5,r0,lsr#16            @ manner...
+       mov     r6,r0,lsr#8
+       strb    r4,[r12,#0]
+       strb    r5,[r12,#1]
+       mov     r4,r1,lsr#24
+       strb    r6,[r12,#2]
+       mov     r5,r1,lsr#16
+       strb    r0,[r12,#3]
+       mov     r6,r1,lsr#8
+       strb    r4,[r12,#4]
+       strb    r5,[r12,#5]
+       mov     r4,r2,lsr#24
+       strb    r6,[r12,#6]
+       mov     r5,r2,lsr#16
+       strb    r1,[r12,#7]
+       mov     r6,r2,lsr#8
+       strb    r4,[r12,#8]
+       strb    r5,[r12,#9]
+       mov     r4,r3,lsr#24
+       strb    r6,[r12,#10]
+       mov     r5,r3,lsr#16
+       strb    r2,[r12,#11]
+       mov     r6,r3,lsr#8
+       strb    r4,[r12,#12]
+       strb    r5,[r12,#13]
+       strb    r6,[r12,#14]
+       strb    r3,[r12,#15]
+#endif
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r12,pc}
+#else
+       ldmia   sp!,{r4-r12,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+.size  AES_encrypt,.-AES_encrypt
+
+.type   _armv4_AES_encrypt,%function
+.align 2
+_armv4_AES_encrypt:
+       str     lr,[sp,#-4]!            @ push lr
+       ldmia   r11!,{r4-r7}
+       eor     r0,r0,r4
+       ldr     r12,[r11,#240-16]
+       eor     r1,r1,r5
+       eor     r2,r2,r6
+       eor     r3,r3,r7
+       sub     r12,r12,#1
+       mov     lr,#255
+
+       and     r7,lr,r0
+       and     r8,lr,r0,lsr#8
+       and     r9,lr,r0,lsr#16
+       mov     r0,r0,lsr#24
+.Lenc_loop:
+       ldr     r4,[r10,r7,lsl#2]       @ Te3[s0>>0]
+       and     r7,lr,r1,lsr#16 @ i0
+       ldr     r5,[r10,r8,lsl#2]       @ Te2[s0>>8]
+       and     r8,lr,r1
+       ldr     r6,[r10,r9,lsl#2]       @ Te1[s0>>16]
+       and     r9,lr,r1,lsr#8
+       ldr     r0,[r10,r0,lsl#2]       @ Te0[s0>>24]
+       mov     r1,r1,lsr#24
+
+       ldr     r7,[r10,r7,lsl#2]       @ Te1[s1>>16]
+       ldr     r8,[r10,r8,lsl#2]       @ Te3[s1>>0]
+       ldr     r9,[r10,r9,lsl#2]       @ Te2[s1>>8]
+       eor     r0,r0,r7,ror#8
+       ldr     r1,[r10,r1,lsl#2]       @ Te0[s1>>24]
+       and     r7,lr,r2,lsr#8  @ i0
+       eor     r5,r5,r8,ror#8
+       and     r8,lr,r2,lsr#16 @ i1
+       eor     r6,r6,r9,ror#8
+       and     r9,lr,r2
+       ldr     r7,[r10,r7,lsl#2]       @ Te2[s2>>8]
+       eor     r1,r1,r4,ror#24
+       ldr     r8,[r10,r8,lsl#2]       @ Te1[s2>>16]
+       mov     r2,r2,lsr#24
+
+       ldr     r9,[r10,r9,lsl#2]       @ Te3[s2>>0]
+       eor     r0,r0,r7,ror#16
+       ldr     r2,[r10,r2,lsl#2]       @ Te0[s2>>24]
+       and     r7,lr,r3                @ i0
+       eor     r1,r1,r8,ror#8
+       and     r8,lr,r3,lsr#8  @ i1
+       eor     r6,r6,r9,ror#16
+       and     r9,lr,r3,lsr#16 @ i2
+       ldr     r7,[r10,r7,lsl#2]       @ Te3[s3>>0]
+       eor     r2,r2,r5,ror#16
+       ldr     r8,[r10,r8,lsl#2]       @ Te2[s3>>8]
+       mov     r3,r3,lsr#24
+
+       ldr     r9,[r10,r9,lsl#2]       @ Te1[s3>>16]
+       eor     r0,r0,r7,ror#24
+       ldr     r7,[r11],#16
+       eor     r1,r1,r8,ror#16
+       ldr     r3,[r10,r3,lsl#2]       @ Te0[s3>>24]
+       eor     r2,r2,r9,ror#8
+       ldr     r4,[r11,#-12]
+       eor     r3,r3,r6,ror#8
+
+       ldr     r5,[r11,#-8]
+       eor     r0,r0,r7
+       ldr     r6,[r11,#-4]
+       and     r7,lr,r0
+       eor     r1,r1,r4
+       and     r8,lr,r0,lsr#8
+       eor     r2,r2,r5
+       and     r9,lr,r0,lsr#16
+       eor     r3,r3,r6
+       mov     r0,r0,lsr#24
+
+       subs    r12,r12,#1
+       bne     .Lenc_loop
+
+       add     r10,r10,#2
+
+       ldrb    r4,[r10,r7,lsl#2]       @ Te4[s0>>0]
+       and     r7,lr,r1,lsr#16 @ i0
+       ldrb    r5,[r10,r8,lsl#2]       @ Te4[s0>>8]
+       and     r8,lr,r1
+       ldrb    r6,[r10,r9,lsl#2]       @ Te4[s0>>16]
+       and     r9,lr,r1,lsr#8
+       ldrb    r0,[r10,r0,lsl#2]       @ Te4[s0>>24]
+       mov     r1,r1,lsr#24
+
+       ldrb    r7,[r10,r7,lsl#2]       @ Te4[s1>>16]
+       ldrb    r8,[r10,r8,lsl#2]       @ Te4[s1>>0]
+       ldrb    r9,[r10,r9,lsl#2]       @ Te4[s1>>8]
+       eor     r0,r7,r0,lsl#8
+       ldrb    r1,[r10,r1,lsl#2]       @ Te4[s1>>24]
+       and     r7,lr,r2,lsr#8  @ i0
+       eor     r5,r8,r5,lsl#8
+       and     r8,lr,r2,lsr#16 @ i1
+       eor     r6,r9,r6,lsl#8
+       and     r9,lr,r2
+       ldrb    r7,[r10,r7,lsl#2]       @ Te4[s2>>8]
+       eor     r1,r4,r1,lsl#24
+       ldrb    r8,[r10,r8,lsl#2]       @ Te4[s2>>16]
+       mov     r2,r2,lsr#24
+
+       ldrb    r9,[r10,r9,lsl#2]       @ Te4[s2>>0]
+       eor     r0,r7,r0,lsl#8
+       ldrb    r2,[r10,r2,lsl#2]       @ Te4[s2>>24]
+       and     r7,lr,r3                @ i0
+       eor     r1,r1,r8,lsl#16
+       and     r8,lr,r3,lsr#8  @ i1
+       eor     r6,r9,r6,lsl#8
+       and     r9,lr,r3,lsr#16 @ i2
+       ldrb    r7,[r10,r7,lsl#2]       @ Te4[s3>>0]
+       eor     r2,r5,r2,lsl#24
+       ldrb    r8,[r10,r8,lsl#2]       @ Te4[s3>>8]
+       mov     r3,r3,lsr#24
+
+       ldrb    r9,[r10,r9,lsl#2]       @ Te4[s3>>16]
+       eor     r0,r7,r0,lsl#8
+       ldr     r7,[r11,#0]
+       ldrb    r3,[r10,r3,lsl#2]       @ Te4[s3>>24]
+       eor     r1,r1,r8,lsl#8
+       ldr     r4,[r11,#4]
+       eor     r2,r2,r9,lsl#16
+       ldr     r5,[r11,#8]
+       eor     r3,r6,r3,lsl#24
+       ldr     r6,[r11,#12]
+
+       eor     r0,r0,r7
+       eor     r1,r1,r4
+       eor     r2,r2,r5
+       eor     r3,r3,r6
+
+       sub     r10,r10,#2
+       ldr     pc,[sp],#4              @ pop and return
+.size  _armv4_AES_encrypt,.-_armv4_AES_encrypt
+
+.global private_AES_set_encrypt_key
+.type   private_AES_set_encrypt_key,%function
+.align 5
+private_AES_set_encrypt_key:
+_armv4_AES_set_encrypt_key:
+       sub     r3,pc,#8                @ AES_set_encrypt_key
+       teq     r0,#0
+       moveq   r0,#-1
+       beq     .Labrt
+       teq     r2,#0
+       moveq   r0,#-1
+       beq     .Labrt
+
+       teq     r1,#128
+       beq     .Lok
+       teq     r1,#192
+       beq     .Lok
+       teq     r1,#256
+       movne   r0,#-1
+       bne     .Labrt
+
+.Lok:  stmdb   sp!,{r4-r12,lr}
+       sub     r10,r3,#_armv4_AES_set_encrypt_key-AES_Te-1024  @ Te4
+
+       mov     r12,r0          @ inp
+       mov     lr,r1                   @ bits
+       mov     r11,r2                  @ key
+
+#if __ARM_ARCH__<7
+       ldrb    r0,[r12,#3]     @ load input data in endian-neutral
+       ldrb    r4,[r12,#2]     @ manner...
+       ldrb    r5,[r12,#1]
+       ldrb    r6,[r12,#0]
+       orr     r0,r0,r4,lsl#8
+       ldrb    r1,[r12,#7]
+       orr     r0,r0,r5,lsl#16
+       ldrb    r4,[r12,#6]
+       orr     r0,r0,r6,lsl#24
+       ldrb    r5,[r12,#5]
+       ldrb    r6,[r12,#4]
+       orr     r1,r1,r4,lsl#8
+       ldrb    r2,[r12,#11]
+       orr     r1,r1,r5,lsl#16
+       ldrb    r4,[r12,#10]
+       orr     r1,r1,r6,lsl#24
+       ldrb    r5,[r12,#9]
+       ldrb    r6,[r12,#8]
+       orr     r2,r2,r4,lsl#8
+       ldrb    r3,[r12,#15]
+       orr     r2,r2,r5,lsl#16
+       ldrb    r4,[r12,#14]
+       orr     r2,r2,r6,lsl#24
+       ldrb    r5,[r12,#13]
+       ldrb    r6,[r12,#12]
+       orr     r3,r3,r4,lsl#8
+       str     r0,[r11],#16
+       orr     r3,r3,r5,lsl#16
+       str     r1,[r11,#-12]
+       orr     r3,r3,r6,lsl#24
+       str     r2,[r11,#-8]
+       str     r3,[r11,#-4]
+#else
+       ldr     r0,[r12,#0]
+       ldr     r1,[r12,#4]
+       ldr     r2,[r12,#8]
+       ldr     r3,[r12,#12]
+#ifdef __ARMEL__
+       rev     r0,r0
+       rev     r1,r1
+       rev     r2,r2
+       rev     r3,r3
+#endif
+       str     r0,[r11],#16
+       str     r1,[r11,#-12]
+       str     r2,[r11,#-8]
+       str     r3,[r11,#-4]
+#endif
+
+       teq     lr,#128
+       bne     .Lnot128
+       mov     r12,#10
+       str     r12,[r11,#240-16]
+       add     r6,r10,#256                     @ rcon
+       mov     lr,#255
+
+.L128_loop:
+       and     r5,lr,r3,lsr#24
+       and     r7,lr,r3,lsr#16
+       ldrb    r5,[r10,r5]
+       and     r8,lr,r3,lsr#8
+       ldrb    r7,[r10,r7]
+       and     r9,lr,r3
+       ldrb    r8,[r10,r8]
+       orr     r5,r5,r7,lsl#24
+       ldrb    r9,[r10,r9]
+       orr     r5,r5,r8,lsl#16
+       ldr     r4,[r6],#4                      @ rcon[i++]
+       orr     r5,r5,r9,lsl#8
+       eor     r5,r5,r4
+       eor     r0,r0,r5                        @ rk[4]=rk[0]^...
+       eor     r1,r1,r0                        @ rk[5]=rk[1]^rk[4]
+       str     r0,[r11],#16
+       eor     r2,r2,r1                        @ rk[6]=rk[2]^rk[5]
+       str     r1,[r11,#-12]
+       eor     r3,r3,r2                        @ rk[7]=rk[3]^rk[6]
+       str     r2,[r11,#-8]
+       subs    r12,r12,#1
+       str     r3,[r11,#-4]
+       bne     .L128_loop
+       sub     r2,r11,#176
+       b       .Ldone
+
+.Lnot128:
+#if __ARM_ARCH__<7
+       ldrb    r8,[r12,#19]
+       ldrb    r4,[r12,#18]
+       ldrb    r5,[r12,#17]
+       ldrb    r6,[r12,#16]
+       orr     r8,r8,r4,lsl#8
+       ldrb    r9,[r12,#23]
+       orr     r8,r8,r5,lsl#16
+       ldrb    r4,[r12,#22]
+       orr     r8,r8,r6,lsl#24
+       ldrb    r5,[r12,#21]
+       ldrb    r6,[r12,#20]
+       orr     r9,r9,r4,lsl#8
+       orr     r9,r9,r5,lsl#16
+       str     r8,[r11],#8
+       orr     r9,r9,r6,lsl#24
+       str     r9,[r11,#-4]
+#else
+       ldr     r8,[r12,#16]
+       ldr     r9,[r12,#20]
+#ifdef __ARMEL__
+       rev     r8,r8
+       rev     r9,r9
+#endif
+       str     r8,[r11],#8
+       str     r9,[r11,#-4]
+#endif
+
+       teq     lr,#192
+       bne     .Lnot192
+       mov     r12,#12
+       str     r12,[r11,#240-24]
+       add     r6,r10,#256                     @ rcon
+       mov     lr,#255
+       mov     r12,#8
+
+.L192_loop:
+       and     r5,lr,r9,lsr#24
+       and     r7,lr,r9,lsr#16
+       ldrb    r5,[r10,r5]
+       and     r8,lr,r9,lsr#8
+       ldrb    r7,[r10,r7]
+       and     r9,lr,r9
+       ldrb    r8,[r10,r8]
+       orr     r5,r5,r7,lsl#24
+       ldrb    r9,[r10,r9]
+       orr     r5,r5,r8,lsl#16
+       ldr     r4,[r6],#4                      @ rcon[i++]
+       orr     r5,r5,r9,lsl#8
+       eor     r9,r5,r4
+       eor     r0,r0,r9                        @ rk[6]=rk[0]^...
+       eor     r1,r1,r0                        @ rk[7]=rk[1]^rk[6]
+       str     r0,[r11],#24
+       eor     r2,r2,r1                        @ rk[8]=rk[2]^rk[7]
+       str     r1,[r11,#-20]
+       eor     r3,r3,r2                        @ rk[9]=rk[3]^rk[8]
+       str     r2,[r11,#-16]
+       subs    r12,r12,#1
+       str     r3,[r11,#-12]
+       subeq   r2,r11,#216
+       beq     .Ldone
+
+       ldr     r7,[r11,#-32]
+       ldr     r8,[r11,#-28]
+       eor     r7,r7,r3                        @ rk[10]=rk[4]^rk[9]
+       eor     r9,r8,r7                        @ rk[11]=rk[5]^rk[10]
+       str     r7,[r11,#-8]
+       str     r9,[r11,#-4]
+       b       .L192_loop
+
+.Lnot192:
+#if __ARM_ARCH__<7
+       ldrb    r8,[r12,#27]
+       ldrb    r4,[r12,#26]
+       ldrb    r5,[r12,#25]
+       ldrb    r6,[r12,#24]
+       orr     r8,r8,r4,lsl#8
+       ldrb    r9,[r12,#31]
+       orr     r8,r8,r5,lsl#16
+       ldrb    r4,[r12,#30]
+       orr     r8,r8,r6,lsl#24
+       ldrb    r5,[r12,#29]
+       ldrb    r6,[r12,#28]
+       orr     r9,r9,r4,lsl#8
+       orr     r9,r9,r5,lsl#16
+       str     r8,[r11],#8
+       orr     r9,r9,r6,lsl#24
+       str     r9,[r11,#-4]
+#else
+       ldr     r8,[r12,#24]
+       ldr     r9,[r12,#28]
+#ifdef __ARMEL__
+       rev     r8,r8
+       rev     r9,r9
+#endif
+       str     r8,[r11],#8
+       str     r9,[r11,#-4]
+#endif
+
+       mov     r12,#14
+       str     r12,[r11,#240-32]
+       add     r6,r10,#256                     @ rcon
+       mov     lr,#255
+       mov     r12,#7
+
+.L256_loop:
+       and     r5,lr,r9,lsr#24
+       and     r7,lr,r9,lsr#16
+       ldrb    r5,[r10,r5]
+       and     r8,lr,r9,lsr#8
+       ldrb    r7,[r10,r7]
+       and     r9,lr,r9
+       ldrb    r8,[r10,r8]
+       orr     r5,r5,r7,lsl#24
+       ldrb    r9,[r10,r9]
+       orr     r5,r5,r8,lsl#16
+       ldr     r4,[r6],#4                      @ rcon[i++]
+       orr     r5,r5,r9,lsl#8
+       eor     r9,r5,r4
+       eor     r0,r0,r9                        @ rk[8]=rk[0]^...
+       eor     r1,r1,r0                        @ rk[9]=rk[1]^rk[8]
+       str     r0,[r11],#32
+       eor     r2,r2,r1                        @ rk[10]=rk[2]^rk[9]
+       str     r1,[r11,#-28]
+       eor     r3,r3,r2                        @ rk[11]=rk[3]^rk[10]
+       str     r2,[r11,#-24]
+       subs    r12,r12,#1
+       str     r3,[r11,#-20]
+       subeq   r2,r11,#256
+       beq     .Ldone
+
+       and     r5,lr,r3
+       and     r7,lr,r3,lsr#8
+       ldrb    r5,[r10,r5]
+       and     r8,lr,r3,lsr#16
+       ldrb    r7,[r10,r7]
+       and     r9,lr,r3,lsr#24
+       ldrb    r8,[r10,r8]
+       orr     r5,r5,r7,lsl#8
+       ldrb    r9,[r10,r9]
+       orr     r5,r5,r8,lsl#16
+       ldr     r4,[r11,#-48]
+       orr     r5,r5,r9,lsl#24
+
+       ldr     r7,[r11,#-44]
+       ldr     r8,[r11,#-40]
+       eor     r4,r4,r5                        @ rk[12]=rk[4]^...
+       ldr     r9,[r11,#-36]
+       eor     r7,r7,r4                        @ rk[13]=rk[5]^rk[12]
+       str     r4,[r11,#-16]
+       eor     r8,r8,r7                        @ rk[14]=rk[6]^rk[13]
+       str     r7,[r11,#-12]
+       eor     r9,r9,r8                        @ rk[15]=rk[7]^rk[14]
+       str     r8,[r11,#-8]
+       str     r9,[r11,#-4]
+       b       .L256_loop
+
+.Ldone:        mov     r0,#0
+       ldmia   sp!,{r4-r12,lr}
+.Labrt:        tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+.size  private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
+
+.global private_AES_set_decrypt_key
+.type   private_AES_set_decrypt_key,%function
+.align 5
+private_AES_set_decrypt_key:
+       str     lr,[sp,#-4]!            @ push lr
+       bl      _armv4_AES_set_encrypt_key
+       teq     r0,#0
+       ldrne   lr,[sp],#4              @ pop lr
+       bne     .Labrt
+
+       stmdb   sp!,{r4-r12}
+
+       ldr     r12,[r2,#240]   @ AES_set_encrypt_key preserves r2,
+       mov     r11,r2                  @ which is AES_KEY *key
+       mov     r7,r2
+       add     r8,r2,r12,lsl#4
+
+.Linv: ldr     r0,[r7]
+       ldr     r1,[r7,#4]
+       ldr     r2,[r7,#8]
+       ldr     r3,[r7,#12]
+       ldr     r4,[r8]
+       ldr     r5,[r8,#4]
+       ldr     r6,[r8,#8]
+       ldr     r9,[r8,#12]
+       str     r0,[r8],#-16
+       str     r1,[r8,#16+4]
+       str     r2,[r8,#16+8]
+       str     r3,[r8,#16+12]
+       str     r4,[r7],#16
+       str     r5,[r7,#-12]
+       str     r6,[r7,#-8]
+       str     r9,[r7,#-4]
+       teq     r7,r8
+       bne     .Linv
+       ldr     r0,[r11,#16]!           @ prefetch tp1
+       mov     r7,#0x80
+       mov     r8,#0x1b
+       orr     r7,r7,#0x8000
+       orr     r8,r8,#0x1b00
+       orr     r7,r7,r7,lsl#16
+       orr     r8,r8,r8,lsl#16
+       sub     r12,r12,#1
+       mvn     r9,r7
+       mov     r12,r12,lsl#2   @ (rounds-1)*4
+
+.Lmix: and     r4,r0,r7
+       and     r1,r0,r9
+       sub     r4,r4,r4,lsr#7
+       and     r4,r4,r8
+       eor     r1,r4,r1,lsl#1  @ tp2
+
+       and     r4,r1,r7
+       and     r2,r1,r9
+       sub     r4,r4,r4,lsr#7
+       and     r4,r4,r8
+       eor     r2,r4,r2,lsl#1  @ tp4
+
+       and     r4,r2,r7
+       and     r3,r2,r9
+       sub     r4,r4,r4,lsr#7
+       and     r4,r4,r8
+       eor     r3,r4,r3,lsl#1  @ tp8
+
+       eor     r4,r1,r2
+       eor     r5,r0,r3                @ tp9
+       eor     r4,r4,r3                @ tpe
+       eor     r4,r4,r1,ror#24
+       eor     r4,r4,r5,ror#24 @ ^= ROTATE(tpb=tp9^tp2,8)
+       eor     r4,r4,r2,ror#16
+       eor     r4,r4,r5,ror#16 @ ^= ROTATE(tpd=tp9^tp4,16)
+       eor     r4,r4,r5,ror#8  @ ^= ROTATE(tp9,24)
+
+       ldr     r0,[r11,#4]             @ prefetch tp1
+       str     r4,[r11],#4
+       subs    r12,r12,#1
+       bne     .Lmix
+
+       mov     r0,#0
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r12,pc}
+#else
+       ldmia   sp!,{r4-r12,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+.size  private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
+
+.type  AES_Td,%object
+.align 5
+AES_Td:
+.word  0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96
+.word  0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393
+.word  0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25
+.word  0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f
+.word  0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1
+.word  0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6
+.word  0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da
+.word  0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844
+.word  0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd
+.word  0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4
+.word  0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45
+.word  0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94
+.word  0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7
+.word  0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a
+.word  0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5
+.word  0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c
+.word  0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1
+.word  0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a
+.word  0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75
+.word  0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051
+.word  0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46
+.word  0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff
+.word  0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77
+.word  0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb
+.word  0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000
+.word  0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e
+.word  0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927
+.word  0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a
+.word  0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e
+.word  0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16
+.word  0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d
+.word  0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8
+.word  0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd
+.word  0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34
+.word  0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163
+.word  0xd731dcca, 0x42638510, 0x13972240, 0x84c61120
+.word  0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d
+.word  0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0
+.word  0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422
+.word  0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef
+.word  0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36
+.word  0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4
+.word  0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662
+.word  0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5
+.word  0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3
+.word  0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b
+.word  0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8
+.word  0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6
+.word  0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6
+.word  0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0
+.word  0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815
+.word  0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f
+.word  0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df
+.word  0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f
+.word  0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e
+.word  0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713
+.word  0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89
+.word  0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c
+.word  0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf
+.word  0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86
+.word  0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f
+.word  0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541
+.word  0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190
+.word  0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742
+@ Td4[256]
+.byte  0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
+.byte  0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
+.byte  0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
+.byte  0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
+.byte  0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
+.byte  0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
+.byte  0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
+.byte  0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
+.byte  0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
+.byte  0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
+.byte  0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
+.byte  0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
+.byte  0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
+.byte  0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
+.byte  0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
+.byte  0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
+.byte  0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
+.byte  0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
+.byte  0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
+.byte  0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
+.byte  0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
+.byte  0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
+.byte  0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
+.byte  0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
+.byte  0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
+.byte  0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
+.byte  0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
+.byte  0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
+.byte  0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
+.byte  0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
+.byte  0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
+.byte  0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
+.size  AES_Td,.-AES_Td
+
+@ void AES_decrypt(const unsigned char *in, unsigned char *out,
+@               const AES_KEY *key) {
+.global AES_decrypt
+.type   AES_decrypt,%function
+.align 5
+AES_decrypt:
+       sub     r3,pc,#8                @ AES_decrypt
+       stmdb   sp!,{r1,r4-r12,lr}
+       mov     r12,r0          @ inp
+       mov     r11,r2
+       sub     r10,r3,#AES_decrypt-AES_Td              @ Td
+#if __ARM_ARCH__<7
+       ldrb    r0,[r12,#3]     @ load input data in endian-neutral
+       ldrb    r4,[r12,#2]     @ manner...
+       ldrb    r5,[r12,#1]
+       ldrb    r6,[r12,#0]
+       orr     r0,r0,r4,lsl#8
+       ldrb    r1,[r12,#7]
+       orr     r0,r0,r5,lsl#16
+       ldrb    r4,[r12,#6]
+       orr     r0,r0,r6,lsl#24
+       ldrb    r5,[r12,#5]
+       ldrb    r6,[r12,#4]
+       orr     r1,r1,r4,lsl#8
+       ldrb    r2,[r12,#11]
+       orr     r1,r1,r5,lsl#16
+       ldrb    r4,[r12,#10]
+       orr     r1,r1,r6,lsl#24
+       ldrb    r5,[r12,#9]
+       ldrb    r6,[r12,#8]
+       orr     r2,r2,r4,lsl#8
+       ldrb    r3,[r12,#15]
+       orr     r2,r2,r5,lsl#16
+       ldrb    r4,[r12,#14]
+       orr     r2,r2,r6,lsl#24
+       ldrb    r5,[r12,#13]
+       ldrb    r6,[r12,#12]
+       orr     r3,r3,r4,lsl#8
+       orr     r3,r3,r5,lsl#16
+       orr     r3,r3,r6,lsl#24
+#else
+       ldr     r0,[r12,#0]
+       ldr     r1,[r12,#4]
+       ldr     r2,[r12,#8]
+       ldr     r3,[r12,#12]
+#ifdef __ARMEL__
+       rev     r0,r0
+       rev     r1,r1
+       rev     r2,r2
+       rev     r3,r3
+#endif
+#endif
+       bl      _armv4_AES_decrypt
+
+       ldr     r12,[sp],#4             @ pop out
+#if __ARM_ARCH__>=7
+#ifdef __ARMEL__
+       rev     r0,r0
+       rev     r1,r1
+       rev     r2,r2
+       rev     r3,r3
+#endif
+       str     r0,[r12,#0]
+       str     r1,[r12,#4]
+       str     r2,[r12,#8]
+       str     r3,[r12,#12]
+#else
+       mov     r4,r0,lsr#24            @ write output in endian-neutral
+       mov     r5,r0,lsr#16            @ manner...
+       mov     r6,r0,lsr#8
+       strb    r4,[r12,#0]
+       strb    r5,[r12,#1]
+       mov     r4,r1,lsr#24
+       strb    r6,[r12,#2]
+       mov     r5,r1,lsr#16
+       strb    r0,[r12,#3]
+       mov     r6,r1,lsr#8
+       strb    r4,[r12,#4]
+       strb    r5,[r12,#5]
+       mov     r4,r2,lsr#24
+       strb    r6,[r12,#6]
+       mov     r5,r2,lsr#16
+       strb    r1,[r12,#7]
+       mov     r6,r2,lsr#8
+       strb    r4,[r12,#8]
+       strb    r5,[r12,#9]
+       mov     r4,r3,lsr#24
+       strb    r6,[r12,#10]
+       mov     r5,r3,lsr#16
+       strb    r2,[r12,#11]
+       mov     r6,r3,lsr#8
+       strb    r4,[r12,#12]
+       strb    r5,[r12,#13]
+       strb    r6,[r12,#14]
+       strb    r3,[r12,#15]
+#endif
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r12,pc}
+#else
+       ldmia   sp!,{r4-r12,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+.size  AES_decrypt,.-AES_decrypt
+
+.type   _armv4_AES_decrypt,%function
+.align 2
+_armv4_AES_decrypt:
+       str     lr,[sp,#-4]!            @ push lr
+       ldmia   r11!,{r4-r7}
+       eor     r0,r0,r4
+       ldr     r12,[r11,#240-16]
+       eor     r1,r1,r5
+       eor     r2,r2,r6
+       eor     r3,r3,r7
+       sub     r12,r12,#1
+       mov     lr,#255
+
+       and     r7,lr,r0,lsr#16
+       and     r8,lr,r0,lsr#8
+       and     r9,lr,r0
+       mov     r0,r0,lsr#24
+.Ldec_loop:
+       ldr     r4,[r10,r7,lsl#2]       @ Td1[s0>>16]
+       and     r7,lr,r1                @ i0
+       ldr     r5,[r10,r8,lsl#2]       @ Td2[s0>>8]
+       and     r8,lr,r1,lsr#16
+       ldr     r6,[r10,r9,lsl#2]       @ Td3[s0>>0]
+       and     r9,lr,r1,lsr#8
+       ldr     r0,[r10,r0,lsl#2]       @ Td0[s0>>24]
+       mov     r1,r1,lsr#24
+
+       ldr     r7,[r10,r7,lsl#2]       @ Td3[s1>>0]
+       ldr     r8,[r10,r8,lsl#2]       @ Td1[s1>>16]
+       ldr     r9,[r10,r9,lsl#2]       @ Td2[s1>>8]
+       eor     r0,r0,r7,ror#24
+       ldr     r1,[r10,r1,lsl#2]       @ Td0[s1>>24]
+       and     r7,lr,r2,lsr#8  @ i0
+       eor     r5,r8,r5,ror#8
+       and     r8,lr,r2                @ i1
+       eor     r6,r9,r6,ror#8
+       and     r9,lr,r2,lsr#16
+       ldr     r7,[r10,r7,lsl#2]       @ Td2[s2>>8]
+       eor     r1,r1,r4,ror#8
+       ldr     r8,[r10,r8,lsl#2]       @ Td3[s2>>0]
+       mov     r2,r2,lsr#24
+
+       ldr     r9,[r10,r9,lsl#2]       @ Td1[s2>>16]
+       eor     r0,r0,r7,ror#16
+       ldr     r2,[r10,r2,lsl#2]       @ Td0[s2>>24]
+       and     r7,lr,r3,lsr#16 @ i0
+       eor     r1,r1,r8,ror#24
+       and     r8,lr,r3,lsr#8  @ i1
+       eor     r6,r9,r6,ror#8
+       and     r9,lr,r3                @ i2
+       ldr     r7,[r10,r7,lsl#2]       @ Td1[s3>>16]
+       eor     r2,r2,r5,ror#8
+       ldr     r8,[r10,r8,lsl#2]       @ Td2[s3>>8]
+       mov     r3,r3,lsr#24
+
+       ldr     r9,[r10,r9,lsl#2]       @ Td3[s3>>0]
+       eor     r0,r0,r7,ror#8
+       ldr     r7,[r11],#16
+       eor     r1,r1,r8,ror#16
+       ldr     r3,[r10,r3,lsl#2]       @ Td0[s3>>24]
+       eor     r2,r2,r9,ror#24
+
+       ldr     r4,[r11,#-12]
+       eor     r0,r0,r7
+       ldr     r5,[r11,#-8]
+       eor     r3,r3,r6,ror#8
+       ldr     r6,[r11,#-4]
+       and     r7,lr,r0,lsr#16
+       eor     r1,r1,r4
+       and     r8,lr,r0,lsr#8
+       eor     r2,r2,r5
+       and     r9,lr,r0
+       eor     r3,r3,r6
+       mov     r0,r0,lsr#24
+
+       subs    r12,r12,#1
+       bne     .Ldec_loop
+
+       add     r10,r10,#1024
+
+       ldr     r5,[r10,#0]             @ prefetch Td4
+       ldr     r6,[r10,#32]
+       ldr     r4,[r10,#64]
+       ldr     r5,[r10,#96]
+       ldr     r6,[r10,#128]
+       ldr     r4,[r10,#160]
+       ldr     r5,[r10,#192]
+       ldr     r6,[r10,#224]
+
+       ldrb    r0,[r10,r0]             @ Td4[s0>>24]
+       ldrb    r4,[r10,r7]             @ Td4[s0>>16]
+       and     r7,lr,r1                @ i0
+       ldrb    r5,[r10,r8]             @ Td4[s0>>8]
+       and     r8,lr,r1,lsr#16
+       ldrb    r6,[r10,r9]             @ Td4[s0>>0]
+       and     r9,lr,r1,lsr#8
+
+       ldrb    r7,[r10,r7]             @ Td4[s1>>0]
+       ldrb    r1,[r10,r1,lsr#24]      @ Td4[s1>>24]
+       ldrb    r8,[r10,r8]             @ Td4[s1>>16]
+       eor     r0,r7,r0,lsl#24
+       ldrb    r9,[r10,r9]             @ Td4[s1>>8]
+       eor     r1,r4,r1,lsl#8
+       and     r7,lr,r2,lsr#8  @ i0
+       eor     r5,r5,r8,lsl#8
+       and     r8,lr,r2                @ i1
+       ldrb    r7,[r10,r7]             @ Td4[s2>>8]
+       eor     r6,r6,r9,lsl#8
+       ldrb    r8,[r10,r8]             @ Td4[s2>>0]
+       and     r9,lr,r2,lsr#16
+
+       ldrb    r2,[r10,r2,lsr#24]      @ Td4[s2>>24]
+       eor     r0,r0,r7,lsl#8
+       ldrb    r9,[r10,r9]             @ Td4[s2>>16]
+       eor     r1,r8,r1,lsl#16
+       and     r7,lr,r3,lsr#16 @ i0
+       eor     r2,r5,r2,lsl#16
+       and     r8,lr,r3,lsr#8  @ i1
+       ldrb    r7,[r10,r7]             @ Td4[s3>>16]
+       eor     r6,r6,r9,lsl#16
+       ldrb    r8,[r10,r8]             @ Td4[s3>>8]
+       and     r9,lr,r3                @ i2
+
+       ldrb    r9,[r10,r9]             @ Td4[s3>>0]
+       ldrb    r3,[r10,r3,lsr#24]      @ Td4[s3>>24]
+       eor     r0,r0,r7,lsl#16
+       ldr     r7,[r11,#0]
+       eor     r1,r1,r8,lsl#8
+       ldr     r4,[r11,#4]
+       eor     r2,r9,r2,lsl#8
+       ldr     r5,[r11,#8]
+       eor     r3,r6,r3,lsl#24
+       ldr     r6,[r11,#12]
+
+       eor     r0,r0,r7
+       eor     r1,r1,r4
+       eor     r2,r2,r5
+       eor     r3,r3,r6
+
+       sub     r10,r10,#1024
+       ldr     pc,[sp],#4              @ pop and return
+.size  _armv4_AES_decrypt,.-_armv4_AES_decrypt
+.asciz "AES for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
diff --git a/deps/openssl/asm/arm-elf-gas/bn/armv4-gf2m.S b/deps/openssl/asm/arm-elf-gas/bn/armv4-gf2m.S
new file mode 100644 (file)
index 0000000..552a883
--- /dev/null
@@ -0,0 +1,214 @@
+#include "arm_arch.h"
+
+.text
+.code  32
+
+#if __ARM_ARCH__>=7
+.fpu   neon
+
+.type  mul_1x1_neon,%function
+.align 5
+mul_1x1_neon:
+       vshl.u64        d2,d16,#8       @ q1-q3 are slided
+
+       vmull.p8        q0,d16,d17      @ a·bb
+       vshl.u64        d4,d16,#16
+       vmull.p8        q1,d2,d17       @ a<<8·bb
+       vshl.u64        d6,d16,#24
+       vmull.p8        q2,d4,d17       @ a<<16·bb
+       vshr.u64        d2,#8
+       vmull.p8        q3,d6,d17       @ a<<24·bb
+       vshl.u64        d3,#24
+       veor            d0,d2
+       vshr.u64        d4,#16
+       veor            d0,d3
+       vshl.u64        d5,#16
+       veor            d0,d4
+       vshr.u64        d6,#24
+       veor            d0,d5
+       vshl.u64        d7,#8
+       veor            d0,d6
+       veor            d0,d7
+       .word   0xe12fff1e
+.size  mul_1x1_neon,.-mul_1x1_neon
+#endif
+.type  mul_1x1_ialu,%function
+.align 5
+mul_1x1_ialu:
+       mov     r4,#0
+       bic     r5,r1,#3<<30            @ a1=a&0x3fffffff
+       str     r4,[sp,#0]              @ tab[0]=0
+       add     r6,r5,r5                @ a2=a1<<1
+       str     r5,[sp,#4]              @ tab[1]=a1
+       eor     r7,r5,r6                @ a1^a2
+       str     r6,[sp,#8]              @ tab[2]=a2
+       mov     r8,r5,lsl#2             @ a4=a1<<2
+       str     r7,[sp,#12]             @ tab[3]=a1^a2
+       eor     r9,r5,r8                @ a1^a4
+       str     r8,[sp,#16]             @ tab[4]=a4
+       eor     r4,r6,r8                @ a2^a4
+       str     r9,[sp,#20]             @ tab[5]=a1^a4
+       eor     r7,r7,r8                @ a1^a2^a4
+       str     r4,[sp,#24]             @ tab[6]=a2^a4
+       and     r8,r12,r0,lsl#2
+       str     r7,[sp,#28]             @ tab[7]=a1^a2^a4
+
+       and     r9,r12,r0,lsr#1
+       ldr     r5,[sp,r8]              @ tab[b       & 0x7]
+       and     r8,r12,r0,lsr#4
+       ldr     r7,[sp,r9]              @ tab[b >>  3 & 0x7]
+       and     r9,r12,r0,lsr#7
+       ldr     r6,[sp,r8]              @ tab[b >>  6 & 0x7]
+       eor     r5,r5,r7,lsl#3  @ stall
+       mov     r4,r7,lsr#29
+       ldr     r7,[sp,r9]              @ tab[b >>  9 & 0x7]
+
+       and     r8,r12,r0,lsr#10
+       eor     r5,r5,r6,lsl#6
+       eor     r4,r4,r6,lsr#26
+       ldr     r6,[sp,r8]              @ tab[b >> 12 & 0x7]
+
+       and     r9,r12,r0,lsr#13
+       eor     r5,r5,r7,lsl#9
+       eor     r4,r4,r7,lsr#23
+       ldr     r7,[sp,r9]              @ tab[b >> 15 & 0x7]
+
+       and     r8,r12,r0,lsr#16
+       eor     r5,r5,r6,lsl#12
+       eor     r4,r4,r6,lsr#20
+       ldr     r6,[sp,r8]              @ tab[b >> 18 & 0x7]
+
+       and     r9,r12,r0,lsr#19
+       eor     r5,r5,r7,lsl#15
+       eor     r4,r4,r7,lsr#17
+       ldr     r7,[sp,r9]              @ tab[b >> 21 & 0x7]
+
+       and     r8,r12,r0,lsr#22
+       eor     r5,r5,r6,lsl#18
+       eor     r4,r4,r6,lsr#14
+       ldr     r6,[sp,r8]              @ tab[b >> 24 & 0x7]
+
+       and     r9,r12,r0,lsr#25
+       eor     r5,r5,r7,lsl#21
+       eor     r4,r4,r7,lsr#11
+       ldr     r7,[sp,r9]              @ tab[b >> 27 & 0x7]
+
+       tst     r1,#1<<30
+       and     r8,r12,r0,lsr#28
+       eor     r5,r5,r6,lsl#24
+       eor     r4,r4,r6,lsr#8
+       ldr     r6,[sp,r8]              @ tab[b >> 30      ]
+
+       eorne   r5,r5,r0,lsl#30
+       eorne   r4,r4,r0,lsr#2
+       tst     r1,#1<<31
+       eor     r5,r5,r7,lsl#27
+       eor     r4,r4,r7,lsr#5
+       eorne   r5,r5,r0,lsl#31
+       eorne   r4,r4,r0,lsr#1
+       eor     r5,r5,r6,lsl#30
+       eor     r4,r4,r6,lsr#2
+
+       mov     pc,lr
+.size  mul_1x1_ialu,.-mul_1x1_ialu
+.global        bn_GF2m_mul_2x2
+.type  bn_GF2m_mul_2x2,%function
+.align 5
+bn_GF2m_mul_2x2:
+#if __ARM_ARCH__>=7
+       ldr     r12,.LOPENSSL_armcap
+.Lpic: ldr     r12,[pc,r12]
+       tst     r12,#1
+       beq     .Lialu
+
+       veor    d18,d18
+       vmov.32 d19,r3,r3               @ two copies of b1
+       vmov.32 d18[0],r1               @ a1
+
+       veor    d20,d20
+       vld1.32 d21[],[sp,:32]  @ two copies of b0
+       vmov.32 d20[0],r2               @ a0
+       mov     r12,lr
+
+       vmov    d16,d18
+       vmov    d17,d19
+       bl      mul_1x1_neon            @ a1·b1
+       vmov    d22,d0
+
+       vmov    d16,d20
+       vmov    d17,d21
+       bl      mul_1x1_neon            @ a0·b0
+       vmov    d23,d0
+
+       veor    d16,d20,d18
+       veor    d17,d21,d19
+       veor    d20,d23,d22
+       bl      mul_1x1_neon            @ (a0+a1)·(b0+b1)
+
+       veor    d0,d20                  @ (a0+a1)·(b0+b1)-a0·b0-a1·b1
+       vshl.u64 d1,d0,#32
+       vshr.u64 d0,d0,#32
+       veor    d23,d1
+       veor    d22,d0
+       vst1.32 {d23[0]},[r0,:32]!
+       vst1.32 {d23[1]},[r0,:32]!
+       vst1.32 {d22[0]},[r0,:32]!
+       vst1.32 {d22[1]},[r0,:32]
+       bx      r12
+.align 4
+.Lialu:
+#endif
+       stmdb   sp!,{r4-r10,lr}
+       mov     r10,r0                  @ reassign 1st argument
+       mov     r0,r3                   @ r0=b1
+       ldr     r3,[sp,#32]             @ load b0
+       mov     r12,#7<<2
+       sub     sp,sp,#32               @ allocate tab[8]
+
+       bl      mul_1x1_ialu            @ a1·b1
+       str     r5,[r10,#8]
+       str     r4,[r10,#12]
+
+       eor     r0,r0,r3                @ flip b0 and b1
+        eor    r1,r1,r2                @ flip a0 and a1
+       eor     r3,r3,r0
+        eor    r2,r2,r1
+       eor     r0,r0,r3
+        eor    r1,r1,r2
+       bl      mul_1x1_ialu            @ a0·b0
+       str     r5,[r10]
+       str     r4,[r10,#4]
+
+       eor     r1,r1,r2
+       eor     r0,r0,r3
+       bl      mul_1x1_ialu            @ (a1+a0)·(b1+b0)
+       ldmia   r10,{r6-r9}
+       eor     r5,r5,r4
+       eor     r4,r4,r7
+       eor     r5,r5,r6
+       eor     r4,r4,r8
+       eor     r5,r5,r9
+       eor     r4,r4,r9
+       str     r4,[r10,#8]
+       eor     r5,r5,r4
+       add     sp,sp,#32               @ destroy tab[8]
+       str     r5,[r10,#4]
+
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r10,pc}
+#else
+       ldmia   sp!,{r4-r10,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+.size  bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
+#if __ARM_ARCH__>=7
+.align 5
+.LOPENSSL_armcap:
+.word  OPENSSL_armcap_P-(.Lpic+8)
+#endif
+.asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 5
+
+.comm  OPENSSL_armcap_P,4,4
diff --git a/deps/openssl/asm/arm-elf-gas/bn/armv4-mont.S b/deps/openssl/asm/arm-elf-gas/bn/armv4-mont.S
new file mode 100644 (file)
index 0000000..ba4cb87
--- /dev/null
@@ -0,0 +1,147 @@
+.text
+
+.global        bn_mul_mont
+.type  bn_mul_mont,%function
+
+.align 2
+bn_mul_mont:
+       stmdb   sp!,{r0,r2}             @ sp points at argument block
+       ldr     r0,[sp,#3*4]            @ load num
+       cmp     r0,#2
+       movlt   r0,#0
+       addlt   sp,sp,#2*4
+       blt     .Labrt
+
+       stmdb   sp!,{r4-r12,lr}         @ save 10 registers
+
+       mov     r0,r0,lsl#2             @ rescale r0 for byte count
+       sub     sp,sp,r0                @ alloca(4*num)
+       sub     sp,sp,#4                @ +extra dword
+       sub     r0,r0,#4                @ "num=num-1"
+       add     r4,r2,r0                @ &bp[num-1]
+
+       add     r0,sp,r0                @ r0 to point at &tp[num-1]
+       ldr     r8,[r0,#14*4]           @ &n0
+       ldr     r2,[r2]         @ bp[0]
+       ldr     r5,[r1],#4              @ ap[0],ap++
+       ldr     r6,[r3],#4              @ np[0],np++
+       ldr     r8,[r8]         @ *n0
+       str     r4,[r0,#15*4]           @ save &bp[num]
+
+       umull   r10,r11,r5,r2   @ ap[0]*bp[0]
+       str     r8,[r0,#14*4]           @ save n0 value
+       mul     r8,r10,r8               @ "tp[0]"*n0
+       mov     r12,#0
+       umlal   r10,r12,r6,r8   @ np[0]*n0+"t[0]"
+       mov     r4,sp
+
+.L1st:
+       ldr     r5,[r1],#4              @ ap[j],ap++
+       mov     r10,r11
+       ldr     r6,[r3],#4              @ np[j],np++
+       mov     r11,#0
+       umlal   r10,r11,r5,r2   @ ap[j]*bp[0]
+       mov     r14,#0
+       umlal   r12,r14,r6,r8   @ np[j]*n0
+       adds    r12,r12,r10
+       str     r12,[r4],#4             @ tp[j-1]=,tp++
+       adc     r12,r14,#0
+       cmp     r4,r0
+       bne     .L1st
+
+       adds    r12,r12,r11
+       ldr     r4,[r0,#13*4]           @ restore bp
+       mov     r14,#0
+       ldr     r8,[r0,#14*4]           @ restore n0
+       adc     r14,r14,#0
+       str     r12,[r0]                @ tp[num-1]=
+       str     r14,[r0,#4]             @ tp[num]=
+
+
+.Louter:
+       sub     r7,r0,sp                @ "original" r0-1 value
+       sub     r1,r1,r7                @ "rewind" ap to &ap[1]
+       ldr     r2,[r4,#4]!             @ *(++bp)
+       sub     r3,r3,r7                @ "rewind" np to &np[1]
+       ldr     r5,[r1,#-4]             @ ap[0]
+       ldr     r10,[sp]                @ tp[0]
+       ldr     r6,[r3,#-4]             @ np[0]
+       ldr     r7,[sp,#4]              @ tp[1]
+
+       mov     r11,#0
+       umlal   r10,r11,r5,r2   @ ap[0]*bp[i]+tp[0]
+       str     r4,[r0,#13*4]           @ save bp
+       mul     r8,r10,r8
+       mov     r12,#0
+       umlal   r10,r12,r6,r8   @ np[0]*n0+"tp[0]"
+       mov     r4,sp
+
+.Linner:
+       ldr     r5,[r1],#4              @ ap[j],ap++
+       adds    r10,r11,r7              @ +=tp[j]
+       ldr     r6,[r3],#4              @ np[j],np++
+       mov     r11,#0
+       umlal   r10,r11,r5,r2   @ ap[j]*bp[i]
+       mov     r14,#0
+       umlal   r12,r14,r6,r8   @ np[j]*n0
+       adc     r11,r11,#0
+       ldr     r7,[r4,#8]              @ tp[j+1]
+       adds    r12,r12,r10
+       str     r12,[r4],#4             @ tp[j-1]=,tp++
+       adc     r12,r14,#0
+       cmp     r4,r0
+       bne     .Linner
+
+       adds    r12,r12,r11
+       mov     r14,#0
+       ldr     r4,[r0,#13*4]           @ restore bp
+       adc     r14,r14,#0
+       ldr     r8,[r0,#14*4]           @ restore n0
+       adds    r12,r12,r7
+       ldr     r7,[r0,#15*4]           @ restore &bp[num]
+       adc     r14,r14,#0
+       str     r12,[r0]                @ tp[num-1]=
+       str     r14,[r0,#4]             @ tp[num]=
+
+       cmp     r4,r7
+       bne     .Louter
+
+
+       ldr     r2,[r0,#12*4]           @ pull rp
+       add     r0,r0,#4                @ r0 to point at &tp[num]
+       sub     r5,r0,sp                @ "original" num value
+       mov     r4,sp                   @ "rewind" r4
+       mov     r1,r4                   @ "borrow" r1
+       sub     r3,r3,r5                @ "rewind" r3 to &np[0]
+
+       subs    r7,r7,r7                @ "clear" carry flag
+.Lsub: ldr     r7,[r4],#4
+       ldr     r6,[r3],#4
+       sbcs    r7,r7,r6                @ tp[j]-np[j]
+       str     r7,[r2],#4              @ rp[j]=
+       teq     r4,r0           @ preserve carry
+       bne     .Lsub
+       sbcs    r14,r14,#0              @ upmost carry
+       mov     r4,sp                   @ "rewind" r4
+       sub     r2,r2,r5                @ "rewind" r2
+
+       and     r1,r4,r14
+       bic     r3,r2,r14
+       orr     r1,r1,r3                @ ap=borrow?tp:rp
+
+.Lcopy:        ldr     r7,[r1],#4              @ copy or in-place refresh
+       str     sp,[r4],#4              @ zap tp
+       str     r7,[r2],#4
+       cmp     r4,r0
+       bne     .Lcopy
+
+       add     sp,r0,#4                @ skip over tp[num+1]
+       ldmia   sp!,{r4-r12,lr}         @ restore registers
+       add     sp,sp,#2*4              @ skip over {r0,r2}
+       mov     r0,#1
+.Labrt:        tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+.size  bn_mul_mont,.-bn_mul_mont
+.asciz "Montgomery multiplication for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
diff --git a/deps/openssl/asm/arm-elf-gas/modes/ghash-armv4.S b/deps/openssl/asm/arm-elf-gas/modes/ghash-armv4.S
new file mode 100644 (file)
index 0000000..872aff1
--- /dev/null
@@ -0,0 +1,415 @@
+#include "arm_arch.h"
+
+.text
+.code  32
+
+.type  rem_4bit,%object
+.align 5
+rem_4bit:
+.short 0x0000,0x1C20,0x3840,0x2460
+.short 0x7080,0x6CA0,0x48C0,0x54E0
+.short 0xE100,0xFD20,0xD940,0xC560
+.short 0x9180,0x8DA0,0xA9C0,0xB5E0
+.size  rem_4bit,.-rem_4bit
+
+.type  rem_4bit_get,%function
+rem_4bit_get:
+       sub     r2,pc,#8
+       sub     r2,r2,#32       @ &rem_4bit
+       b       .Lrem_4bit_got
+       nop
+.size  rem_4bit_get,.-rem_4bit_get
+
+.global        gcm_ghash_4bit
+.type  gcm_ghash_4bit,%function
+gcm_ghash_4bit:
+       sub     r12,pc,#8
+       add     r3,r2,r3                @ r3 to point at the end
+       stmdb   sp!,{r3-r11,lr}         @ save r3/end too
+       sub     r12,r12,#48             @ &rem_4bit
+
+       ldmia   r12,{r4-r11}            @ copy rem_4bit ...
+       stmdb   sp!,{r4-r11}            @ ... to stack
+
+       ldrb    r12,[r2,#15]
+       ldrb    r14,[r0,#15]
+.Louter:
+       eor     r12,r12,r14
+       and     r14,r12,#0xf0
+       and     r12,r12,#0x0f
+       mov     r3,#14
+
+       add     r7,r1,r12,lsl#4
+       ldmia   r7,{r4-r7}      @ load Htbl[nlo]
+       add     r11,r1,r14
+       ldrb    r12,[r2,#14]
+
+       and     r14,r4,#0xf             @ rem
+       ldmia   r11,{r8-r11}    @ load Htbl[nhi]
+       add     r14,r14,r14
+       eor     r4,r8,r4,lsr#4
+       ldrh    r8,[sp,r14]             @ rem_4bit[rem]
+       eor     r4,r4,r5,lsl#28
+       ldrb    r14,[r0,#14]
+       eor     r5,r9,r5,lsr#4
+       eor     r5,r5,r6,lsl#28
+       eor     r6,r10,r6,lsr#4
+       eor     r6,r6,r7,lsl#28
+       eor     r7,r11,r7,lsr#4
+       eor     r12,r12,r14
+       and     r14,r12,#0xf0
+       and     r12,r12,#0x0f
+       eor     r7,r7,r8,lsl#16
+
+.Linner:
+       add     r11,r1,r12,lsl#4
+       and     r12,r4,#0xf             @ rem
+       subs    r3,r3,#1
+       add     r12,r12,r12
+       ldmia   r11,{r8-r11}    @ load Htbl[nlo]
+       eor     r4,r8,r4,lsr#4
+       eor     r4,r4,r5,lsl#28
+       eor     r5,r9,r5,lsr#4
+       eor     r5,r5,r6,lsl#28
+       ldrh    r8,[sp,r12]             @ rem_4bit[rem]
+       eor     r6,r10,r6,lsr#4
+       ldrplb  r12,[r2,r3]
+       eor     r6,r6,r7,lsl#28
+       eor     r7,r11,r7,lsr#4
+
+       add     r11,r1,r14
+       and     r14,r4,#0xf             @ rem
+       eor     r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
+       add     r14,r14,r14
+       ldmia   r11,{r8-r11}    @ load Htbl[nhi]
+       eor     r4,r8,r4,lsr#4
+       ldrplb  r8,[r0,r3]
+       eor     r4,r4,r5,lsl#28
+       eor     r5,r9,r5,lsr#4
+       ldrh    r9,[sp,r14]
+       eor     r5,r5,r6,lsl#28
+       eor     r6,r10,r6,lsr#4
+       eor     r6,r6,r7,lsl#28
+       eorpl   r12,r12,r8
+       eor     r7,r11,r7,lsr#4
+       andpl   r14,r12,#0xf0
+       andpl   r12,r12,#0x0f
+       eor     r7,r7,r9,lsl#16 @ ^= rem_4bit[rem]
+       bpl     .Linner
+
+       ldr     r3,[sp,#32]             @ re-load r3/end
+       add     r2,r2,#16
+       mov     r14,r4
+#if __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r4,r4
+       str     r4,[r0,#12]
+#elif defined(__ARMEB__)
+       str     r4,[r0,#12]
+#else
+       mov     r9,r4,lsr#8
+       strb    r4,[r0,#12+3]
+       mov     r10,r4,lsr#16
+       strb    r9,[r0,#12+2]
+       mov     r11,r4,lsr#24
+       strb    r10,[r0,#12+1]
+       strb    r11,[r0,#12]
+#endif
+       cmp     r2,r3
+#if __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r5,r5
+       str     r5,[r0,#8]
+#elif defined(__ARMEB__)
+       str     r5,[r0,#8]
+#else
+       mov     r9,r5,lsr#8
+       strb    r5,[r0,#8+3]
+       mov     r10,r5,lsr#16
+       strb    r9,[r0,#8+2]
+       mov     r11,r5,lsr#24
+       strb    r10,[r0,#8+1]
+       strb    r11,[r0,#8]
+#endif
+       ldrneb  r12,[r2,#15]
+#if __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r6,r6
+       str     r6,[r0,#4]
+#elif defined(__ARMEB__)
+       str     r6,[r0,#4]
+#else
+       mov     r9,r6,lsr#8
+       strb    r6,[r0,#4+3]
+       mov     r10,r6,lsr#16
+       strb    r9,[r0,#4+2]
+       mov     r11,r6,lsr#24
+       strb    r10,[r0,#4+1]
+       strb    r11,[r0,#4]
+#endif
+
+
+#if __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r7,r7
+       str     r7,[r0,#0]
+#elif defined(__ARMEB__)
+       str     r7,[r0,#0]
+#else
+       mov     r9,r7,lsr#8
+       strb    r7,[r0,#0+3]
+       mov     r10,r7,lsr#16
+       strb    r9,[r0,#0+2]
+       mov     r11,r7,lsr#24
+       strb    r10,[r0,#0+1]
+       strb    r11,[r0,#0]
+#endif
+
+
+       bne     .Louter
+
+       add     sp,sp,#36
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r11,pc}
+#else
+       ldmia   sp!,{r4-r11,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+.size  gcm_ghash_4bit,.-gcm_ghash_4bit
+
+.global        gcm_gmult_4bit
+.type  gcm_gmult_4bit,%function
+gcm_gmult_4bit:
+       stmdb   sp!,{r4-r11,lr}
+       ldrb    r12,[r0,#15]
+       b       rem_4bit_get
+.Lrem_4bit_got:
+       and     r14,r12,#0xf0
+       and     r12,r12,#0x0f
+       mov     r3,#14
+
+       add     r7,r1,r12,lsl#4
+       ldmia   r7,{r4-r7}      @ load Htbl[nlo]
+       ldrb    r12,[r0,#14]
+
+       add     r11,r1,r14
+       and     r14,r4,#0xf             @ rem
+       ldmia   r11,{r8-r11}    @ load Htbl[nhi]
+       add     r14,r14,r14
+       eor     r4,r8,r4,lsr#4
+       ldrh    r8,[r2,r14]     @ rem_4bit[rem]
+       eor     r4,r4,r5,lsl#28
+       eor     r5,r9,r5,lsr#4
+       eor     r5,r5,r6,lsl#28
+       eor     r6,r10,r6,lsr#4
+       eor     r6,r6,r7,lsl#28
+       eor     r7,r11,r7,lsr#4
+       and     r14,r12,#0xf0
+       eor     r7,r7,r8,lsl#16
+       and     r12,r12,#0x0f
+
+.Loop:
+       add     r11,r1,r12,lsl#4
+       and     r12,r4,#0xf             @ rem
+       subs    r3,r3,#1
+       add     r12,r12,r12
+       ldmia   r11,{r8-r11}    @ load Htbl[nlo]
+       eor     r4,r8,r4,lsr#4
+       eor     r4,r4,r5,lsl#28
+       eor     r5,r9,r5,lsr#4
+       eor     r5,r5,r6,lsl#28
+       ldrh    r8,[r2,r12]     @ rem_4bit[rem]
+       eor     r6,r10,r6,lsr#4
+       ldrplb  r12,[r0,r3]
+       eor     r6,r6,r7,lsl#28
+       eor     r7,r11,r7,lsr#4
+
+       add     r11,r1,r14
+       and     r14,r4,#0xf             @ rem
+       eor     r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
+       add     r14,r14,r14
+       ldmia   r11,{r8-r11}    @ load Htbl[nhi]
+       eor     r4,r8,r4,lsr#4
+       eor     r4,r4,r5,lsl#28
+       eor     r5,r9,r5,lsr#4
+       ldrh    r8,[r2,r14]     @ rem_4bit[rem]
+       eor     r5,r5,r6,lsl#28
+       eor     r6,r10,r6,lsr#4
+       eor     r6,r6,r7,lsl#28
+       eor     r7,r11,r7,lsr#4
+       andpl   r14,r12,#0xf0
+       andpl   r12,r12,#0x0f
+       eor     r7,r7,r8,lsl#16 @ ^= rem_4bit[rem]
+       bpl     .Loop
+#if __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r4,r4
+       str     r4,[r0,#12]
+#elif defined(__ARMEB__)
+       str     r4,[r0,#12]
+#else
+       mov     r9,r4,lsr#8
+       strb    r4,[r0,#12+3]
+       mov     r10,r4,lsr#16
+       strb    r9,[r0,#12+2]
+       mov     r11,r4,lsr#24
+       strb    r10,[r0,#12+1]
+       strb    r11,[r0,#12]
+#endif
+
+
+#if __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r5,r5
+       str     r5,[r0,#8]
+#elif defined(__ARMEB__)
+       str     r5,[r0,#8]
+#else
+       mov     r9,r5,lsr#8
+       strb    r5,[r0,#8+3]
+       mov     r10,r5,lsr#16
+       strb    r9,[r0,#8+2]
+       mov     r11,r5,lsr#24
+       strb    r10,[r0,#8+1]
+       strb    r11,[r0,#8]
+#endif
+
+
+#if __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r6,r6
+       str     r6,[r0,#4]
+#elif defined(__ARMEB__)
+       str     r6,[r0,#4]
+#else
+       mov     r9,r6,lsr#8
+       strb    r6,[r0,#4+3]
+       mov     r10,r6,lsr#16
+       strb    r9,[r0,#4+2]
+       mov     r11,r6,lsr#24
+       strb    r10,[r0,#4+1]
+       strb    r11,[r0,#4]
+#endif
+
+
+#if __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r7,r7
+       str     r7,[r0,#0]
+#elif defined(__ARMEB__)
+       str     r7,[r0,#0]
+#else
+       mov     r9,r7,lsr#8
+       strb    r7,[r0,#0+3]
+       mov     r10,r7,lsr#16
+       strb    r9,[r0,#0+2]
+       mov     r11,r7,lsr#24
+       strb    r10,[r0,#0+1]
+       strb    r11,[r0,#0]
+#endif
+
+
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r11,pc}
+#else
+       ldmia   sp!,{r4-r11,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+.size  gcm_gmult_4bit,.-gcm_gmult_4bit
+#if __ARM_ARCH__>=7
+.fpu   neon
+
+.global        gcm_gmult_neon
+.type  gcm_gmult_neon,%function
+.align 4
+gcm_gmult_neon:
+       sub             r1,#16          @ point at H in GCM128_CTX
+       vld1.64         d29,[r0,:64]!@ load Xi
+       vmov.i32        d5,#0xe1                @ our irreducible polynomial
+       vld1.64         d28,[r0,:64]!
+       vshr.u64        d5,#32
+       vldmia          r1,{d0-d1}      @ load H
+       veor            q12,q12
+#ifdef __ARMEL__
+       vrev64.8        q14,q14
+#endif
+       veor            q13,q13
+       veor            q11,q11
+       mov             r1,#16
+       veor            q10,q10
+       mov             r3,#16
+       veor            d2,d2
+       vdup.8          d4,d28[0]       @ broadcast lowest byte
+       b               .Linner_neon
+.size  gcm_gmult_neon,.-gcm_gmult_neon
+
+.global        gcm_ghash_neon
+.type  gcm_ghash_neon,%function
+.align 4
+gcm_ghash_neon:
+       vld1.64         d21,[r0,:64]!   @ load Xi
+       vmov.i32        d5,#0xe1                @ our irreducible polynomial
+       vld1.64         d20,[r0,:64]!
+       vshr.u64        d5,#32
+       vldmia          r0,{d0-d1}              @ load H
+       veor            q12,q12
+       nop
+#ifdef __ARMEL__
+       vrev64.8        q10,q10
+#endif
+.Louter_neon:
+       vld1.64         d29,[r2]!       @ load inp
+       veor            q13,q13
+       vld1.64         d28,[r2]!
+       veor            q11,q11
+       mov             r1,#16
+#ifdef __ARMEL__
+       vrev64.8        q14,q14
+#endif
+       veor            d2,d2
+       veor            q14,q10                 @ inp^=Xi
+       veor            q10,q10
+       vdup.8          d4,d28[0]       @ broadcast lowest byte
+.Linner_neon:
+       subs            r1,r1,#1
+       vmull.p8        q9,d1,d4                @ H.lo·Xi[i]
+       vmull.p8        q8,d0,d4                @ H.hi·Xi[i]
+       vext.8          q14,q12,#1              @ IN>>=8
+
+       veor            q10,q13         @ modulo-scheduled part
+       vshl.i64        d22,#48
+       vdup.8          d4,d28[0]       @ broadcast lowest byte
+       veor            d3,d18,d20
+
+       veor            d21,d22
+       vuzp.8          q9,q8
+       vsli.8          d2,d3,#1                @ compose the "carry" byte
+       vext.8          q10,q12,#1              @ Z>>=8
+
+       vmull.p8        q11,d2,d5               @ "carry"·0xe1
+       vshr.u8         d2,d3,#7                @ save Z's bottom bit
+       vext.8          q13,q9,q12,#1   @ Qlo>>=8
+       veor            q10,q8
+       bne             .Linner_neon
+
+       veor            q10,q13         @ modulo-scheduled artefact
+       vshl.i64        d22,#48
+       veor            d21,d22
+
+       @ finalization, normalize Z:Zo
+       vand            d2,d5           @ suffices to mask the bit
+       vshr.u64        d3,d20,#63
+       vshl.i64        q10,#1
+       subs            r3,#16
+       vorr            q10,q1          @ Z=Z:Zo<<1
+       bne             .Louter_neon
+
+#ifdef __ARMEL__
+       vrev64.8        q10,q10
+#endif
+       sub             r0,#16
+
+       vst1.64         d21,[r0,:64]!   @ write out Xi
+       vst1.64         d20,[r0,:64]
+
+       .word   0xe12fff1e
+.size  gcm_ghash_neon,.-gcm_ghash_neon
+#endif
+.asciz  "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align  2
diff --git a/deps/openssl/asm/arm-elf-gas/sha/sha1-armv4-large.S b/deps/openssl/asm/arm-elf-gas/sha/sha1-armv4-large.S
new file mode 100644 (file)
index 0000000..639ae78
--- /dev/null
@@ -0,0 +1,452 @@
+#include "arm_arch.h"
+
+.text
+
+.global        sha1_block_data_order
+.type  sha1_block_data_order,%function
+
+.align 2
+sha1_block_data_order:
+       stmdb   sp!,{r4-r12,lr}
+       add     r2,r1,r2,lsl#6  @ r2 to point at the end of r1
+       ldmia   r0,{r3,r4,r5,r6,r7}
+.Lloop:
+       ldr     r8,.LK_00_19
+       mov     r14,sp
+       sub     sp,sp,#15*4
+       mov     r5,r5,ror#30
+       mov     r6,r6,ror#30
+       mov     r7,r7,ror#30            @ [6]
+.L_00_15:
+#if __ARM_ARCH__<7
+       ldrb    r10,[r1,#2]
+       ldrb    r9,[r1,#3]
+       ldrb    r11,[r1,#1]
+       add     r7,r8,r7,ror#2                  @ E+=K_00_19
+       ldrb    r12,[r1],#4
+       orr     r9,r9,r10,lsl#8
+       eor     r10,r5,r6                       @ F_xx_xx
+       orr     r9,r9,r11,lsl#16
+       add     r7,r7,r3,ror#27                 @ E+=ROR(A,27)
+       orr     r9,r9,r12,lsl#24
+#else
+       ldr     r9,[r1],#4                      @ handles unaligned
+       add     r7,r8,r7,ror#2                  @ E+=K_00_19
+       eor     r10,r5,r6                       @ F_xx_xx
+       add     r7,r7,r3,ror#27                 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+       rev     r9,r9                           @ byte swap
+#endif
+#endif
+       and     r10,r4,r10,ror#2
+       add     r7,r7,r9                        @ E+=X[i]
+       eor     r10,r10,r6,ror#2                @ F_00_19(B,C,D)
+       str     r9,[r14,#-4]!
+       add     r7,r7,r10                       @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7
+       ldrb    r10,[r1,#2]
+       ldrb    r9,[r1,#3]
+       ldrb    r11,[r1,#1]
+       add     r6,r8,r6,ror#2                  @ E+=K_00_19
+       ldrb    r12,[r1],#4
+       orr     r9,r9,r10,lsl#8
+       eor     r10,r4,r5                       @ F_xx_xx
+       orr     r9,r9,r11,lsl#16
+       add     r6,r6,r7,ror#27                 @ E+=ROR(A,27)
+       orr     r9,r9,r12,lsl#24
+#else
+       ldr     r9,[r1],#4                      @ handles unaligned
+       add     r6,r8,r6,ror#2                  @ E+=K_00_19
+       eor     r10,r4,r5                       @ F_xx_xx
+       add     r6,r6,r7,ror#27                 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+       rev     r9,r9                           @ byte swap
+#endif
+#endif
+       and     r10,r3,r10,ror#2
+       add     r6,r6,r9                        @ E+=X[i]
+       eor     r10,r10,r5,ror#2                @ F_00_19(B,C,D)
+       str     r9,[r14,#-4]!
+       add     r6,r6,r10                       @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7
+       ldrb    r10,[r1,#2]
+       ldrb    r9,[r1,#3]
+       ldrb    r11,[r1,#1]
+       add     r5,r8,r5,ror#2                  @ E+=K_00_19
+       ldrb    r12,[r1],#4
+       orr     r9,r9,r10,lsl#8
+       eor     r10,r3,r4                       @ F_xx_xx
+       orr     r9,r9,r11,lsl#16
+       add     r5,r5,r6,ror#27                 @ E+=ROR(A,27)
+       orr     r9,r9,r12,lsl#24
+#else
+       ldr     r9,[r1],#4                      @ handles unaligned
+       add     r5,r8,r5,ror#2                  @ E+=K_00_19
+       eor     r10,r3,r4                       @ F_xx_xx
+       add     r5,r5,r6,ror#27                 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+       rev     r9,r9                           @ byte swap
+#endif
+#endif
+       and     r10,r7,r10,ror#2
+       add     r5,r5,r9                        @ E+=X[i]
+       eor     r10,r10,r4,ror#2                @ F_00_19(B,C,D)
+       str     r9,[r14,#-4]!
+       add     r5,r5,r10                       @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7
+       ldrb    r10,[r1,#2]
+       ldrb    r9,[r1,#3]
+       ldrb    r11,[r1,#1]
+       add     r4,r8,r4,ror#2                  @ E+=K_00_19
+       ldrb    r12,[r1],#4
+       orr     r9,r9,r10,lsl#8
+       eor     r10,r7,r3                       @ F_xx_xx
+       orr     r9,r9,r11,lsl#16
+       add     r4,r4,r5,ror#27                 @ E+=ROR(A,27)
+       orr     r9,r9,r12,lsl#24
+#else
+       ldr     r9,[r1],#4                      @ handles unaligned
+       add     r4,r8,r4,ror#2                  @ E+=K_00_19
+       eor     r10,r7,r3                       @ F_xx_xx
+       add     r4,r4,r5,ror#27                 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+       rev     r9,r9                           @ byte swap
+#endif
+#endif
+       and     r10,r6,r10,ror#2
+       add     r4,r4,r9                        @ E+=X[i]
+       eor     r10,r10,r3,ror#2                @ F_00_19(B,C,D)
+       str     r9,[r14,#-4]!
+       add     r4,r4,r10                       @ E+=F_00_19(B,C,D)
+#if __ARM_ARCH__<7
+       ldrb    r10,[r1,#2]
+       ldrb    r9,[r1,#3]
+       ldrb    r11,[r1,#1]
+       add     r3,r8,r3,ror#2                  @ E+=K_00_19
+       ldrb    r12,[r1],#4
+       orr     r9,r9,r10,lsl#8
+       eor     r10,r6,r7                       @ F_xx_xx
+       orr     r9,r9,r11,lsl#16
+       add     r3,r3,r4,ror#27                 @ E+=ROR(A,27)
+       orr     r9,r9,r12,lsl#24
+#else
+       ldr     r9,[r1],#4                      @ handles unaligned
+       add     r3,r8,r3,ror#2                  @ E+=K_00_19
+       eor     r10,r6,r7                       @ F_xx_xx
+       add     r3,r3,r4,ror#27                 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+       rev     r9,r9                           @ byte swap
+#endif
+#endif
+       and     r10,r5,r10,ror#2
+       add     r3,r3,r9                        @ E+=X[i]
+       eor     r10,r10,r7,ror#2                @ F_00_19(B,C,D)
+       str     r9,[r14,#-4]!
+       add     r3,r3,r10                       @ E+=F_00_19(B,C,D)
+       teq     r14,sp
+       bne     .L_00_15                @ [((11+4)*5+2)*3]
+       sub     sp,sp,#25*4
+#if __ARM_ARCH__<7
+       ldrb    r10,[r1,#2]
+       ldrb    r9,[r1,#3]
+       ldrb    r11,[r1,#1]
+       add     r7,r8,r7,ror#2                  @ E+=K_00_19
+       ldrb    r12,[r1],#4
+       orr     r9,r9,r10,lsl#8
+       eor     r10,r5,r6                       @ F_xx_xx
+       orr     r9,r9,r11,lsl#16
+       add     r7,r7,r3,ror#27                 @ E+=ROR(A,27)
+       orr     r9,r9,r12,lsl#24
+#else
+       ldr     r9,[r1],#4                      @ handles unaligned
+       add     r7,r8,r7,ror#2                  @ E+=K_00_19
+       eor     r10,r5,r6                       @ F_xx_xx
+       add     r7,r7,r3,ror#27                 @ E+=ROR(A,27)
+#ifdef __ARMEL__
+       rev     r9,r9                           @ byte swap
+#endif
+#endif
+       and     r10,r4,r10,ror#2
+       add     r7,r7,r9                        @ E+=X[i]
+       eor     r10,r10,r6,ror#2                @ F_00_19(B,C,D)
+       str     r9,[r14,#-4]!
+       add     r7,r7,r10                       @ E+=F_00_19(B,C,D)
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r6,r8,r6,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r4,r5                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r6,r6,r7,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r3,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r6,r6,r9                        @ E+=X[i]
+       eor     r10,r10,r5,ror#2                @ F_00_19(B,C,D)
+       add     r6,r6,r10                       @ E+=F_00_19(B,C,D)
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r5,r8,r5,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r3,r4                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r5,r5,r6,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r7,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r5,r5,r9                        @ E+=X[i]
+       eor     r10,r10,r4,ror#2                @ F_00_19(B,C,D)
+       add     r5,r5,r10                       @ E+=F_00_19(B,C,D)
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r4,r8,r4,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r7,r3                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r4,r4,r5,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r6,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r4,r4,r9                        @ E+=X[i]
+       eor     r10,r10,r3,ror#2                @ F_00_19(B,C,D)
+       add     r4,r4,r10                       @ E+=F_00_19(B,C,D)
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r3,r8,r3,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r6,r7                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r3,r3,r4,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r5,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r3,r3,r9                        @ E+=X[i]
+       eor     r10,r10,r7,ror#2                @ F_00_19(B,C,D)
+       add     r3,r3,r10                       @ E+=F_00_19(B,C,D)
+
+       ldr     r8,.LK_20_39            @ [+15+16*4]
+       cmn     sp,#0                   @ [+3], clear carry to denote 20_39
+.L_20_39_or_60_79:
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r7,r8,r7,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r5,r6                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r7,r7,r3,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       eor r10,r4,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r7,r7,r9                        @ E+=X[i]
+       add     r7,r7,r10                       @ E+=F_20_39(B,C,D)
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r6,r8,r6,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r4,r5                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r6,r6,r7,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       eor r10,r3,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r6,r6,r9                        @ E+=X[i]
+       add     r6,r6,r10                       @ E+=F_20_39(B,C,D)
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r5,r8,r5,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r3,r4                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r5,r5,r6,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       eor r10,r7,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r5,r5,r9                        @ E+=X[i]
+       add     r5,r5,r10                       @ E+=F_20_39(B,C,D)
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r4,r8,r4,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r7,r3                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r4,r4,r5,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       eor r10,r6,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r4,r4,r9                        @ E+=X[i]
+       add     r4,r4,r10                       @ E+=F_20_39(B,C,D)
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r3,r8,r3,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r6,r7                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r3,r3,r4,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       eor r10,r5,r10,ror#2                                    @ F_xx_xx
+                                               @ F_xx_xx
+       add     r3,r3,r9                        @ E+=X[i]
+       add     r3,r3,r10                       @ E+=F_20_39(B,C,D)
+       teq     r14,sp                  @ preserve carry
+       bne     .L_20_39_or_60_79       @ [+((12+3)*5+2)*4]
+       bcs     .L_done                 @ [+((12+3)*5+2)*4], spare 300 bytes
+
+       ldr     r8,.LK_40_59
+       sub     sp,sp,#20*4             @ [+2]
+.L_40_59:
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r7,r8,r7,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r5,r6                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r7,r7,r3,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r4,r10,ror#2                                    @ F_xx_xx
+       and r11,r5,r6                                   @ F_xx_xx
+       add     r7,r7,r9                        @ E+=X[i]
+       add     r7,r7,r10                       @ E+=F_40_59(B,C,D)
+       add     r7,r7,r11,ror#2
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r6,r8,r6,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r4,r5                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r6,r6,r7,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r3,r10,ror#2                                    @ F_xx_xx
+       and r11,r4,r5                                   @ F_xx_xx
+       add     r6,r6,r9                        @ E+=X[i]
+       add     r6,r6,r10                       @ E+=F_40_59(B,C,D)
+       add     r6,r6,r11,ror#2
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r5,r8,r5,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r3,r4                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r5,r5,r6,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r7,r10,ror#2                                    @ F_xx_xx
+       and r11,r3,r4                                   @ F_xx_xx
+       add     r5,r5,r9                        @ E+=X[i]
+       add     r5,r5,r10                       @ E+=F_40_59(B,C,D)
+       add     r5,r5,r11,ror#2
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r4,r8,r4,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r7,r3                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r4,r4,r5,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r6,r10,ror#2                                    @ F_xx_xx
+       and r11,r7,r3                                   @ F_xx_xx
+       add     r4,r4,r9                        @ E+=X[i]
+       add     r4,r4,r10                       @ E+=F_40_59(B,C,D)
+       add     r4,r4,r11,ror#2
+       ldr     r9,[r14,#15*4]
+       ldr     r10,[r14,#13*4]
+       ldr     r11,[r14,#7*4]
+       add     r3,r8,r3,ror#2                  @ E+=K_xx_xx
+       ldr     r12,[r14,#2*4]
+       eor     r9,r9,r10
+       eor     r11,r11,r12                     @ 1 cycle stall
+       eor     r10,r6,r7                       @ F_xx_xx
+       mov     r9,r9,ror#31
+       add     r3,r3,r4,ror#27                 @ E+=ROR(A,27)
+       eor     r9,r9,r11,ror#31
+       str     r9,[r14,#-4]!
+       and r10,r5,r10,ror#2                                    @ F_xx_xx
+       and r11,r6,r7                                   @ F_xx_xx
+       add     r3,r3,r9                        @ E+=X[i]
+       add     r3,r3,r10                       @ E+=F_40_59(B,C,D)
+       add     r3,r3,r11,ror#2
+       teq     r14,sp
+       bne     .L_40_59                @ [+((12+5)*5+2)*4]
+
+       ldr     r8,.LK_60_79
+       sub     sp,sp,#20*4
+       cmp     sp,#0                   @ set carry to denote 60_79
+       b       .L_20_39_or_60_79       @ [+4], spare 300 bytes
+.L_done:
+       add     sp,sp,#80*4             @ "deallocate" stack frame
+       ldmia   r0,{r8,r9,r10,r11,r12}
+       add     r3,r8,r3
+       add     r4,r9,r4
+       add     r5,r10,r5,ror#2
+       add     r6,r11,r6,ror#2
+       add     r7,r12,r7,ror#2
+       stmia   r0,{r3,r4,r5,r6,r7}
+       teq     r1,r2
+       bne     .Lloop                  @ [+18], total 1307
+
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r12,pc}
+#else
+       ldmia   sp!,{r4-r12,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+.align 2
+.LK_00_19:     .word   0x5a827999
+.LK_20_39:     .word   0x6ed9eba1
+.LK_40_59:     .word   0x8f1bbcdc
+.LK_60_79:     .word   0xca62c1d6
+.size  sha1_block_data_order,.-sha1_block_data_order
+.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
diff --git a/deps/openssl/asm/arm-elf-gas/sha/sha256-armv4.S b/deps/openssl/asm/arm-elf-gas/sha/sha256-armv4.S
new file mode 100644 (file)
index 0000000..9c20a63
--- /dev/null
@@ -0,0 +1,1517 @@
+#include "arm_arch.h"
+
+.text
+.code  32
+
+.type  K256,%object
+.align 5
+K256:
+.word  0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
+.word  0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
+.word  0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
+.word  0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
+.word  0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
+.word  0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
+.word  0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
+.word  0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
+.word  0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
+.word  0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
+.word  0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
+.word  0xd192e819,0xd6990624,0xf40e3585,0x106aa070
+.word  0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
+.word  0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
+.word  0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
+.word  0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
+.size  K256,.-K256
+
+.global        sha256_block_data_order
+.type  sha256_block_data_order,%function
+sha256_block_data_order:
+       sub     r3,pc,#8                @ sha256_block_data_order
+       add     r2,r1,r2,lsl#6  @ len to point at the end of inp
+       stmdb   sp!,{r0,r1,r2,r4-r11,lr}
+       ldmia   r0,{r4,r5,r6,r7,r8,r9,r10,r11}
+       sub     r14,r3,#256             @ K256
+       sub     sp,sp,#16*4             @ alloca(X[16])
+.Loop:
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 0
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r8,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r8,ror#11
+       eor     r2,r9,r10
+#if 0>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 0==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r8,ror#25 @ Sigma1(e)
+       and     r2,r2,r8
+       str     r3,[sp,#0*4]
+       add     r3,r3,r0
+       eor     r2,r2,r10                       @ Ch(e,f,g)
+       add     r3,r3,r11
+       mov     r11,r4,ror#2
+       add     r3,r3,r2
+       eor     r11,r11,r4,ror#13
+       add     r3,r3,r12
+       eor     r11,r11,r4,ror#22               @ Sigma0(a)
+#if 0>=15
+       ldr     r1,[sp,#2*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r4,r5
+       and     r2,r4,r5
+       and     r0,r0,r6
+       add     r11,r11,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r7,r7,r3
+       add     r11,r11,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 1
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r7,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r7,ror#11
+       eor     r2,r8,r9
+#if 1>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 1==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r7,ror#25 @ Sigma1(e)
+       and     r2,r2,r7
+       str     r3,[sp,#1*4]
+       add     r3,r3,r0
+       eor     r2,r2,r9                        @ Ch(e,f,g)
+       add     r3,r3,r10
+       mov     r10,r11,ror#2
+       add     r3,r3,r2
+       eor     r10,r10,r11,ror#13
+       add     r3,r3,r12
+       eor     r10,r10,r11,ror#22              @ Sigma0(a)
+#if 1>=15
+       ldr     r1,[sp,#3*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r11,r4
+       and     r2,r11,r4
+       and     r0,r0,r5
+       add     r10,r10,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r6,r6,r3
+       add     r10,r10,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 2
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r6,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r6,ror#11
+       eor     r2,r7,r8
+#if 2>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 2==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r6,ror#25 @ Sigma1(e)
+       and     r2,r2,r6
+       str     r3,[sp,#2*4]
+       add     r3,r3,r0
+       eor     r2,r2,r8                        @ Ch(e,f,g)
+       add     r3,r3,r9
+       mov     r9,r10,ror#2
+       add     r3,r3,r2
+       eor     r9,r9,r10,ror#13
+       add     r3,r3,r12
+       eor     r9,r9,r10,ror#22                @ Sigma0(a)
+#if 2>=15
+       ldr     r1,[sp,#4*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r10,r11
+       and     r2,r10,r11
+       and     r0,r0,r4
+       add     r9,r9,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r5,r5,r3
+       add     r9,r9,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 3
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r5,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r5,ror#11
+       eor     r2,r6,r7
+#if 3>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 3==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r5,ror#25 @ Sigma1(e)
+       and     r2,r2,r5
+       str     r3,[sp,#3*4]
+       add     r3,r3,r0
+       eor     r2,r2,r7                        @ Ch(e,f,g)
+       add     r3,r3,r8
+       mov     r8,r9,ror#2
+       add     r3,r3,r2
+       eor     r8,r8,r9,ror#13
+       add     r3,r3,r12
+       eor     r8,r8,r9,ror#22         @ Sigma0(a)
+#if 3>=15
+       ldr     r1,[sp,#5*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r9,r10
+       and     r2,r9,r10
+       and     r0,r0,r11
+       add     r8,r8,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r4,r4,r3
+       add     r8,r8,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 4
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r4,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r4,ror#11
+       eor     r2,r5,r6
+#if 4>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 4==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r4,ror#25 @ Sigma1(e)
+       and     r2,r2,r4
+       str     r3,[sp,#4*4]
+       add     r3,r3,r0
+       eor     r2,r2,r6                        @ Ch(e,f,g)
+       add     r3,r3,r7
+       mov     r7,r8,ror#2
+       add     r3,r3,r2
+       eor     r7,r7,r8,ror#13
+       add     r3,r3,r12
+       eor     r7,r7,r8,ror#22         @ Sigma0(a)
+#if 4>=15
+       ldr     r1,[sp,#6*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r8,r9
+       and     r2,r8,r9
+       and     r0,r0,r10
+       add     r7,r7,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r11,r11,r3
+       add     r7,r7,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 5
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r11,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r11,ror#11
+       eor     r2,r4,r5
+#if 5>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 5==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r11,ror#25        @ Sigma1(e)
+       and     r2,r2,r11
+       str     r3,[sp,#5*4]
+       add     r3,r3,r0
+       eor     r2,r2,r5                        @ Ch(e,f,g)
+       add     r3,r3,r6
+       mov     r6,r7,ror#2
+       add     r3,r3,r2
+       eor     r6,r6,r7,ror#13
+       add     r3,r3,r12
+       eor     r6,r6,r7,ror#22         @ Sigma0(a)
+#if 5>=15
+       ldr     r1,[sp,#7*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r7,r8
+       and     r2,r7,r8
+       and     r0,r0,r9
+       add     r6,r6,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r10,r10,r3
+       add     r6,r6,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 6
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r10,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r10,ror#11
+       eor     r2,r11,r4
+#if 6>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 6==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r10,ror#25        @ Sigma1(e)
+       and     r2,r2,r10
+       str     r3,[sp,#6*4]
+       add     r3,r3,r0
+       eor     r2,r2,r4                        @ Ch(e,f,g)
+       add     r3,r3,r5
+       mov     r5,r6,ror#2
+       add     r3,r3,r2
+       eor     r5,r5,r6,ror#13
+       add     r3,r3,r12
+       eor     r5,r5,r6,ror#22         @ Sigma0(a)
+#if 6>=15
+       ldr     r1,[sp,#8*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r6,r7
+       and     r2,r6,r7
+       and     r0,r0,r8
+       add     r5,r5,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r9,r9,r3
+       add     r5,r5,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 7
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r9,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r9,ror#11
+       eor     r2,r10,r11
+#if 7>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 7==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r9,ror#25 @ Sigma1(e)
+       and     r2,r2,r9
+       str     r3,[sp,#7*4]
+       add     r3,r3,r0
+       eor     r2,r2,r11                       @ Ch(e,f,g)
+       add     r3,r3,r4
+       mov     r4,r5,ror#2
+       add     r3,r3,r2
+       eor     r4,r4,r5,ror#13
+       add     r3,r3,r12
+       eor     r4,r4,r5,ror#22         @ Sigma0(a)
+#if 7>=15
+       ldr     r1,[sp,#9*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r5,r6
+       and     r2,r5,r6
+       and     r0,r0,r7
+       add     r4,r4,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r8,r8,r3
+       add     r4,r4,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 8
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r8,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r8,ror#11
+       eor     r2,r9,r10
+#if 8>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 8==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r8,ror#25 @ Sigma1(e)
+       and     r2,r2,r8
+       str     r3,[sp,#8*4]
+       add     r3,r3,r0
+       eor     r2,r2,r10                       @ Ch(e,f,g)
+       add     r3,r3,r11
+       mov     r11,r4,ror#2
+       add     r3,r3,r2
+       eor     r11,r11,r4,ror#13
+       add     r3,r3,r12
+       eor     r11,r11,r4,ror#22               @ Sigma0(a)
+#if 8>=15
+       ldr     r1,[sp,#10*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r4,r5
+       and     r2,r4,r5
+       and     r0,r0,r6
+       add     r11,r11,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r7,r7,r3
+       add     r11,r11,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 9
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r7,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r7,ror#11
+       eor     r2,r8,r9
+#if 9>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 9==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r7,ror#25 @ Sigma1(e)
+       and     r2,r2,r7
+       str     r3,[sp,#9*4]
+       add     r3,r3,r0
+       eor     r2,r2,r9                        @ Ch(e,f,g)
+       add     r3,r3,r10
+       mov     r10,r11,ror#2
+       add     r3,r3,r2
+       eor     r10,r10,r11,ror#13
+       add     r3,r3,r12
+       eor     r10,r10,r11,ror#22              @ Sigma0(a)
+#if 9>=15
+       ldr     r1,[sp,#11*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r11,r4
+       and     r2,r11,r4
+       and     r0,r0,r5
+       add     r10,r10,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r6,r6,r3
+       add     r10,r10,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 10
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r6,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r6,ror#11
+       eor     r2,r7,r8
+#if 10>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 10==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r6,ror#25 @ Sigma1(e)
+       and     r2,r2,r6
+       str     r3,[sp,#10*4]
+       add     r3,r3,r0
+       eor     r2,r2,r8                        @ Ch(e,f,g)
+       add     r3,r3,r9
+       mov     r9,r10,ror#2
+       add     r3,r3,r2
+       eor     r9,r9,r10,ror#13
+       add     r3,r3,r12
+       eor     r9,r9,r10,ror#22                @ Sigma0(a)
+#if 10>=15
+       ldr     r1,[sp,#12*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r10,r11
+       and     r2,r10,r11
+       and     r0,r0,r4
+       add     r9,r9,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r5,r5,r3
+       add     r9,r9,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 11
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r5,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r5,ror#11
+       eor     r2,r6,r7
+#if 11>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 11==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r5,ror#25 @ Sigma1(e)
+       and     r2,r2,r5
+       str     r3,[sp,#11*4]
+       add     r3,r3,r0
+       eor     r2,r2,r7                        @ Ch(e,f,g)
+       add     r3,r3,r8
+       mov     r8,r9,ror#2
+       add     r3,r3,r2
+       eor     r8,r8,r9,ror#13
+       add     r3,r3,r12
+       eor     r8,r8,r9,ror#22         @ Sigma0(a)
+#if 11>=15
+       ldr     r1,[sp,#13*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r9,r10
+       and     r2,r9,r10
+       and     r0,r0,r11
+       add     r8,r8,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r4,r4,r3
+       add     r8,r8,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 12
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r4,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r4,ror#11
+       eor     r2,r5,r6
+#if 12>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 12==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r4,ror#25 @ Sigma1(e)
+       and     r2,r2,r4
+       str     r3,[sp,#12*4]
+       add     r3,r3,r0
+       eor     r2,r2,r6                        @ Ch(e,f,g)
+       add     r3,r3,r7
+       mov     r7,r8,ror#2
+       add     r3,r3,r2
+       eor     r7,r7,r8,ror#13
+       add     r3,r3,r12
+       eor     r7,r7,r8,ror#22         @ Sigma0(a)
+#if 12>=15
+       ldr     r1,[sp,#14*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r8,r9
+       and     r2,r8,r9
+       and     r0,r0,r10
+       add     r7,r7,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r11,r11,r3
+       add     r7,r7,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 13
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r11,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r11,ror#11
+       eor     r2,r4,r5
+#if 13>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 13==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r11,ror#25        @ Sigma1(e)
+       and     r2,r2,r11
+       str     r3,[sp,#13*4]
+       add     r3,r3,r0
+       eor     r2,r2,r5                        @ Ch(e,f,g)
+       add     r3,r3,r6
+       mov     r6,r7,ror#2
+       add     r3,r3,r2
+       eor     r6,r6,r7,ror#13
+       add     r3,r3,r12
+       eor     r6,r6,r7,ror#22         @ Sigma0(a)
+#if 13>=15
+       ldr     r1,[sp,#15*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r7,r8
+       and     r2,r7,r8
+       and     r0,r0,r9
+       add     r6,r6,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r10,r10,r3
+       add     r6,r6,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 14
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r10,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r10,ror#11
+       eor     r2,r11,r4
+#if 14>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 14==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r10,ror#25        @ Sigma1(e)
+       and     r2,r2,r10
+       str     r3,[sp,#14*4]
+       add     r3,r3,r0
+       eor     r2,r2,r4                        @ Ch(e,f,g)
+       add     r3,r3,r5
+       mov     r5,r6,ror#2
+       add     r3,r3,r2
+       eor     r5,r5,r6,ror#13
+       add     r3,r3,r12
+       eor     r5,r5,r6,ror#22         @ Sigma0(a)
+#if 14>=15
+       ldr     r1,[sp,#0*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r6,r7
+       and     r2,r6,r7
+       and     r0,r0,r8
+       add     r5,r5,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r9,r9,r3
+       add     r5,r5,r0
+#if __ARM_ARCH__>=7
+       ldr     r3,[r1],#4
+#else
+       ldrb    r3,[r1,#3]                      @ 15
+       ldrb    r12,[r1,#2]
+       ldrb    r2,[r1,#1]
+       ldrb    r0,[r1],#4
+       orr     r3,r3,r12,lsl#8
+       orr     r3,r3,r2,lsl#16
+       orr     r3,r3,r0,lsl#24
+#endif
+       mov     r0,r9,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r9,ror#11
+       eor     r2,r10,r11
+#if 15>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 15==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r9,ror#25 @ Sigma1(e)
+       and     r2,r2,r9
+       str     r3,[sp,#15*4]
+       add     r3,r3,r0
+       eor     r2,r2,r11                       @ Ch(e,f,g)
+       add     r3,r3,r4
+       mov     r4,r5,ror#2
+       add     r3,r3,r2
+       eor     r4,r4,r5,ror#13
+       add     r3,r3,r12
+       eor     r4,r4,r5,ror#22         @ Sigma0(a)
+#if 15>=15
+       ldr     r1,[sp,#1*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r5,r6
+       and     r2,r5,r6
+       and     r0,r0,r7
+       add     r4,r4,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r8,r8,r3
+       add     r4,r4,r0
+.Lrounds_16_xx:
+       @ ldr   r1,[sp,#1*4]            @ 16
+       ldr     r12,[sp,#14*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#0*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#9*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r8,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r8,ror#11
+       eor     r2,r9,r10
+#if 16>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 16==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r8,ror#25 @ Sigma1(e)
+       and     r2,r2,r8
+       str     r3,[sp,#0*4]
+       add     r3,r3,r0
+       eor     r2,r2,r10                       @ Ch(e,f,g)
+       add     r3,r3,r11
+       mov     r11,r4,ror#2
+       add     r3,r3,r2
+       eor     r11,r11,r4,ror#13
+       add     r3,r3,r12
+       eor     r11,r11,r4,ror#22               @ Sigma0(a)
+#if 16>=15
+       ldr     r1,[sp,#2*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r4,r5
+       and     r2,r4,r5
+       and     r0,r0,r6
+       add     r11,r11,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r7,r7,r3
+       add     r11,r11,r0
+       @ ldr   r1,[sp,#2*4]            @ 17
+       ldr     r12,[sp,#15*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#1*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#10*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r7,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r7,ror#11
+       eor     r2,r8,r9
+#if 17>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 17==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r7,ror#25 @ Sigma1(e)
+       and     r2,r2,r7
+       str     r3,[sp,#1*4]
+       add     r3,r3,r0
+       eor     r2,r2,r9                        @ Ch(e,f,g)
+       add     r3,r3,r10
+       mov     r10,r11,ror#2
+       add     r3,r3,r2
+       eor     r10,r10,r11,ror#13
+       add     r3,r3,r12
+       eor     r10,r10,r11,ror#22              @ Sigma0(a)
+#if 17>=15
+       ldr     r1,[sp,#3*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r11,r4
+       and     r2,r11,r4
+       and     r0,r0,r5
+       add     r10,r10,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r6,r6,r3
+       add     r10,r10,r0
+       @ ldr   r1,[sp,#3*4]            @ 18
+       ldr     r12,[sp,#0*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#2*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#11*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r6,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r6,ror#11
+       eor     r2,r7,r8
+#if 18>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 18==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r6,ror#25 @ Sigma1(e)
+       and     r2,r2,r6
+       str     r3,[sp,#2*4]
+       add     r3,r3,r0
+       eor     r2,r2,r8                        @ Ch(e,f,g)
+       add     r3,r3,r9
+       mov     r9,r10,ror#2
+       add     r3,r3,r2
+       eor     r9,r9,r10,ror#13
+       add     r3,r3,r12
+       eor     r9,r9,r10,ror#22                @ Sigma0(a)
+#if 18>=15
+       ldr     r1,[sp,#4*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r10,r11
+       and     r2,r10,r11
+       and     r0,r0,r4
+       add     r9,r9,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r5,r5,r3
+       add     r9,r9,r0
+       @ ldr   r1,[sp,#4*4]            @ 19
+       ldr     r12,[sp,#1*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#3*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#12*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r5,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r5,ror#11
+       eor     r2,r6,r7
+#if 19>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 19==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r5,ror#25 @ Sigma1(e)
+       and     r2,r2,r5
+       str     r3,[sp,#3*4]
+       add     r3,r3,r0
+       eor     r2,r2,r7                        @ Ch(e,f,g)
+       add     r3,r3,r8
+       mov     r8,r9,ror#2
+       add     r3,r3,r2
+       eor     r8,r8,r9,ror#13
+       add     r3,r3,r12
+       eor     r8,r8,r9,ror#22         @ Sigma0(a)
+#if 19>=15
+       ldr     r1,[sp,#5*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r9,r10
+       and     r2,r9,r10
+       and     r0,r0,r11
+       add     r8,r8,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r4,r4,r3
+       add     r8,r8,r0
+       @ ldr   r1,[sp,#5*4]            @ 20
+       ldr     r12,[sp,#2*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#4*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#13*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r4,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r4,ror#11
+       eor     r2,r5,r6
+#if 20>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 20==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r4,ror#25 @ Sigma1(e)
+       and     r2,r2,r4
+       str     r3,[sp,#4*4]
+       add     r3,r3,r0
+       eor     r2,r2,r6                        @ Ch(e,f,g)
+       add     r3,r3,r7
+       mov     r7,r8,ror#2
+       add     r3,r3,r2
+       eor     r7,r7,r8,ror#13
+       add     r3,r3,r12
+       eor     r7,r7,r8,ror#22         @ Sigma0(a)
+#if 20>=15
+       ldr     r1,[sp,#6*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r8,r9
+       and     r2,r8,r9
+       and     r0,r0,r10
+       add     r7,r7,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r11,r11,r3
+       add     r7,r7,r0
+       @ ldr   r1,[sp,#6*4]            @ 21
+       ldr     r12,[sp,#3*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#5*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#14*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r11,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r11,ror#11
+       eor     r2,r4,r5
+#if 21>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 21==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r11,ror#25        @ Sigma1(e)
+       and     r2,r2,r11
+       str     r3,[sp,#5*4]
+       add     r3,r3,r0
+       eor     r2,r2,r5                        @ Ch(e,f,g)
+       add     r3,r3,r6
+       mov     r6,r7,ror#2
+       add     r3,r3,r2
+       eor     r6,r6,r7,ror#13
+       add     r3,r3,r12
+       eor     r6,r6,r7,ror#22         @ Sigma0(a)
+#if 21>=15
+       ldr     r1,[sp,#7*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r7,r8
+       and     r2,r7,r8
+       and     r0,r0,r9
+       add     r6,r6,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r10,r10,r3
+       add     r6,r6,r0
+       @ ldr   r1,[sp,#7*4]            @ 22
+       ldr     r12,[sp,#4*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#6*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#15*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r10,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r10,ror#11
+       eor     r2,r11,r4
+#if 22>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 22==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r10,ror#25        @ Sigma1(e)
+       and     r2,r2,r10
+       str     r3,[sp,#6*4]
+       add     r3,r3,r0
+       eor     r2,r2,r4                        @ Ch(e,f,g)
+       add     r3,r3,r5
+       mov     r5,r6,ror#2
+       add     r3,r3,r2
+       eor     r5,r5,r6,ror#13
+       add     r3,r3,r12
+       eor     r5,r5,r6,ror#22         @ Sigma0(a)
+#if 22>=15
+       ldr     r1,[sp,#8*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r6,r7
+       and     r2,r6,r7
+       and     r0,r0,r8
+       add     r5,r5,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r9,r9,r3
+       add     r5,r5,r0
+       @ ldr   r1,[sp,#8*4]            @ 23
+       ldr     r12,[sp,#5*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#7*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#0*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r9,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r9,ror#11
+       eor     r2,r10,r11
+#if 23>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 23==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r9,ror#25 @ Sigma1(e)
+       and     r2,r2,r9
+       str     r3,[sp,#7*4]
+       add     r3,r3,r0
+       eor     r2,r2,r11                       @ Ch(e,f,g)
+       add     r3,r3,r4
+       mov     r4,r5,ror#2
+       add     r3,r3,r2
+       eor     r4,r4,r5,ror#13
+       add     r3,r3,r12
+       eor     r4,r4,r5,ror#22         @ Sigma0(a)
+#if 23>=15
+       ldr     r1,[sp,#9*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r5,r6
+       and     r2,r5,r6
+       and     r0,r0,r7
+       add     r4,r4,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r8,r8,r3
+       add     r4,r4,r0
+       @ ldr   r1,[sp,#9*4]            @ 24
+       ldr     r12,[sp,#6*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#8*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#1*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r8,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r8,ror#11
+       eor     r2,r9,r10
+#if 24>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 24==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r8,ror#25 @ Sigma1(e)
+       and     r2,r2,r8
+       str     r3,[sp,#8*4]
+       add     r3,r3,r0
+       eor     r2,r2,r10                       @ Ch(e,f,g)
+       add     r3,r3,r11
+       mov     r11,r4,ror#2
+       add     r3,r3,r2
+       eor     r11,r11,r4,ror#13
+       add     r3,r3,r12
+       eor     r11,r11,r4,ror#22               @ Sigma0(a)
+#if 24>=15
+       ldr     r1,[sp,#10*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r4,r5
+       and     r2,r4,r5
+       and     r0,r0,r6
+       add     r11,r11,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r7,r7,r3
+       add     r11,r11,r0
+       @ ldr   r1,[sp,#10*4]           @ 25
+       ldr     r12,[sp,#7*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#9*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#2*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r7,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r7,ror#11
+       eor     r2,r8,r9
+#if 25>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 25==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r7,ror#25 @ Sigma1(e)
+       and     r2,r2,r7
+       str     r3,[sp,#9*4]
+       add     r3,r3,r0
+       eor     r2,r2,r9                        @ Ch(e,f,g)
+       add     r3,r3,r10
+       mov     r10,r11,ror#2
+       add     r3,r3,r2
+       eor     r10,r10,r11,ror#13
+       add     r3,r3,r12
+       eor     r10,r10,r11,ror#22              @ Sigma0(a)
+#if 25>=15
+       ldr     r1,[sp,#11*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r11,r4
+       and     r2,r11,r4
+       and     r0,r0,r5
+       add     r10,r10,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r6,r6,r3
+       add     r10,r10,r0
+       @ ldr   r1,[sp,#11*4]           @ 26
+       ldr     r12,[sp,#8*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#10*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#3*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r6,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r6,ror#11
+       eor     r2,r7,r8
+#if 26>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 26==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r6,ror#25 @ Sigma1(e)
+       and     r2,r2,r6
+       str     r3,[sp,#10*4]
+       add     r3,r3,r0
+       eor     r2,r2,r8                        @ Ch(e,f,g)
+       add     r3,r3,r9
+       mov     r9,r10,ror#2
+       add     r3,r3,r2
+       eor     r9,r9,r10,ror#13
+       add     r3,r3,r12
+       eor     r9,r9,r10,ror#22                @ Sigma0(a)
+#if 26>=15
+       ldr     r1,[sp,#12*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r10,r11
+       and     r2,r10,r11
+       and     r0,r0,r4
+       add     r9,r9,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r5,r5,r3
+       add     r9,r9,r0
+       @ ldr   r1,[sp,#12*4]           @ 27
+       ldr     r12,[sp,#9*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#11*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#4*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r5,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r5,ror#11
+       eor     r2,r6,r7
+#if 27>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 27==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r5,ror#25 @ Sigma1(e)
+       and     r2,r2,r5
+       str     r3,[sp,#11*4]
+       add     r3,r3,r0
+       eor     r2,r2,r7                        @ Ch(e,f,g)
+       add     r3,r3,r8
+       mov     r8,r9,ror#2
+       add     r3,r3,r2
+       eor     r8,r8,r9,ror#13
+       add     r3,r3,r12
+       eor     r8,r8,r9,ror#22         @ Sigma0(a)
+#if 27>=15
+       ldr     r1,[sp,#13*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r9,r10
+       and     r2,r9,r10
+       and     r0,r0,r11
+       add     r8,r8,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r4,r4,r3
+       add     r8,r8,r0
+       @ ldr   r1,[sp,#13*4]           @ 28
+       ldr     r12,[sp,#10*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#12*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#5*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r4,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r4,ror#11
+       eor     r2,r5,r6
+#if 28>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 28==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r4,ror#25 @ Sigma1(e)
+       and     r2,r2,r4
+       str     r3,[sp,#12*4]
+       add     r3,r3,r0
+       eor     r2,r2,r6                        @ Ch(e,f,g)
+       add     r3,r3,r7
+       mov     r7,r8,ror#2
+       add     r3,r3,r2
+       eor     r7,r7,r8,ror#13
+       add     r3,r3,r12
+       eor     r7,r7,r8,ror#22         @ Sigma0(a)
+#if 28>=15
+       ldr     r1,[sp,#14*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r8,r9
+       and     r2,r8,r9
+       and     r0,r0,r10
+       add     r7,r7,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r11,r11,r3
+       add     r7,r7,r0
+       @ ldr   r1,[sp,#14*4]           @ 29
+       ldr     r12,[sp,#11*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#13*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#6*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r11,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r11,ror#11
+       eor     r2,r4,r5
+#if 29>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 29==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r11,ror#25        @ Sigma1(e)
+       and     r2,r2,r11
+       str     r3,[sp,#13*4]
+       add     r3,r3,r0
+       eor     r2,r2,r5                        @ Ch(e,f,g)
+       add     r3,r3,r6
+       mov     r6,r7,ror#2
+       add     r3,r3,r2
+       eor     r6,r6,r7,ror#13
+       add     r3,r3,r12
+       eor     r6,r6,r7,ror#22         @ Sigma0(a)
+#if 29>=15
+       ldr     r1,[sp,#15*4]           @ from BODY_16_xx
+#endif
+       orr     r0,r7,r8
+       and     r2,r7,r8
+       and     r0,r0,r9
+       add     r6,r6,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r10,r10,r3
+       add     r6,r6,r0
+       @ ldr   r1,[sp,#15*4]           @ 30
+       ldr     r12,[sp,#12*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#14*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#7*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r10,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r10,ror#11
+       eor     r2,r11,r4
+#if 30>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 30==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r10,ror#25        @ Sigma1(e)
+       and     r2,r2,r10
+       str     r3,[sp,#14*4]
+       add     r3,r3,r0
+       eor     r2,r2,r4                        @ Ch(e,f,g)
+       add     r3,r3,r5
+       mov     r5,r6,ror#2
+       add     r3,r3,r2
+       eor     r5,r5,r6,ror#13
+       add     r3,r3,r12
+       eor     r5,r5,r6,ror#22         @ Sigma0(a)
+#if 30>=15
+       ldr     r1,[sp,#0*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r6,r7
+       and     r2,r6,r7
+       and     r0,r0,r8
+       add     r5,r5,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r9,r9,r3
+       add     r5,r5,r0
+       @ ldr   r1,[sp,#0*4]            @ 31
+       ldr     r12,[sp,#13*4]
+       mov     r0,r1,ror#7
+       ldr     r3,[sp,#15*4]
+       eor     r0,r0,r1,ror#18
+       ldr     r2,[sp,#8*4]
+       eor     r0,r0,r1,lsr#3  @ sigma0(X[i+1])
+       mov     r1,r12,ror#17
+       add     r3,r3,r0
+       eor     r1,r1,r12,ror#19
+       add     r3,r3,r2
+       eor     r1,r1,r12,lsr#10        @ sigma1(X[i+14])
+       @ add   r3,r3,r1
+       mov     r0,r9,ror#6
+       ldr     r12,[r14],#4                    @ *K256++
+       eor     r0,r0,r9,ror#11
+       eor     r2,r10,r11
+#if 31>=16
+       add     r3,r3,r1                        @ from BODY_16_xx
+#elif __ARM_ARCH__>=7 && defined(__ARMEL__)
+       rev     r3,r3
+#endif
+#if 31==15
+       str     r1,[sp,#17*4]                   @ leave room for r1
+#endif
+       eor     r0,r0,r9,ror#25 @ Sigma1(e)
+       and     r2,r2,r9
+       str     r3,[sp,#15*4]
+       add     r3,r3,r0
+       eor     r2,r2,r11                       @ Ch(e,f,g)
+       add     r3,r3,r4
+       mov     r4,r5,ror#2
+       add     r3,r3,r2
+       eor     r4,r4,r5,ror#13
+       add     r3,r3,r12
+       eor     r4,r4,r5,ror#22         @ Sigma0(a)
+#if 31>=15
+       ldr     r1,[sp,#1*4]            @ from BODY_16_xx
+#endif
+       orr     r0,r5,r6
+       and     r2,r5,r6
+       and     r0,r0,r7
+       add     r4,r4,r3
+       orr     r0,r0,r2                        @ Maj(a,b,c)
+       add     r8,r8,r3
+       add     r4,r4,r0
+       and     r12,r12,#0xff
+       cmp     r12,#0xf2
+       bne     .Lrounds_16_xx
+
+       ldr     r3,[sp,#16*4]           @ pull ctx
+       ldr     r0,[r3,#0]
+       ldr     r2,[r3,#4]
+       ldr     r12,[r3,#8]
+       add     r4,r4,r0
+       ldr     r0,[r3,#12]
+       add     r5,r5,r2
+       ldr     r2,[r3,#16]
+       add     r6,r6,r12
+       ldr     r12,[r3,#20]
+       add     r7,r7,r0
+       ldr     r0,[r3,#24]
+       add     r8,r8,r2
+       ldr     r2,[r3,#28]
+       add     r9,r9,r12
+       ldr     r1,[sp,#17*4]           @ pull inp
+       ldr     r12,[sp,#18*4]          @ pull inp+len
+       add     r10,r10,r0
+       add     r11,r11,r2
+       stmia   r3,{r4,r5,r6,r7,r8,r9,r10,r11}
+       cmp     r1,r12
+       sub     r14,r14,#256    @ rewind Ktbl
+       bne     .Loop
+
+       add     sp,sp,#19*4     @ destroy frame
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r11,pc}
+#else
+       ldmia   sp!,{r4-r11,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+.size   sha256_block_data_order,.-sha256_block_data_order
+.asciz  "SHA256 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
diff --git a/deps/openssl/asm/arm-elf-gas/sha/sha512-armv4.S b/deps/openssl/asm/arm-elf-gas/sha/sha512-armv4.S
new file mode 100644 (file)
index 0000000..5730192
--- /dev/null
@@ -0,0 +1,1783 @@
+#include "arm_arch.h"
+#ifdef __ARMEL__
+# define LO 0
+# define HI 4
+# define WORD64(hi0,lo0,hi1,lo1)       .word   lo0,hi0, lo1,hi1
+#else
+# define HI 0
+# define LO 4
+# define WORD64(hi0,lo0,hi1,lo1)       .word   hi0,lo0, hi1,lo1
+#endif
+
+.text
+.code  32
+.type  K512,%object
+.align 5
+K512:
+WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
+WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
+WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
+WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
+WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
+WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
+WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
+WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
+WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
+WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
+WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
+WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
+WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
+WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
+WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
+WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
+WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
+WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
+WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
+WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
+WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
+WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
+WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
+WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
+WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
+WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
+WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
+WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
+WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
+WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
+WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
+WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
+WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
+WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
+WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
+WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
+WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
+WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
+WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
+WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
+.size  K512,.-K512
+.LOPENSSL_armcap:
+.word  OPENSSL_armcap_P-sha512_block_data_order
+.skip  32-4
+
+.global        sha512_block_data_order
+.type  sha512_block_data_order,%function
+sha512_block_data_order:
+       sub     r3,pc,#8                @ sha512_block_data_order
+       add     r2,r1,r2,lsl#7  @ len to point at the end of inp
+#if __ARM_ARCH__>=7
+       ldr     r12,.LOPENSSL_armcap
+       ldr     r12,[r3,r12]            @ OPENSSL_armcap_P
+       tst     r12,#1
+       bne     .LNEON
+#endif
+       stmdb   sp!,{r4-r12,lr}
+       sub     r14,r3,#672             @ K512
+       sub     sp,sp,#9*8
+
+       ldr     r7,[r0,#32+LO]
+       ldr     r8,[r0,#32+HI]
+       ldr     r9, [r0,#48+LO]
+       ldr     r10, [r0,#48+HI]
+       ldr     r11, [r0,#56+LO]
+       ldr     r12, [r0,#56+HI]
+.Loop:
+       str     r9, [sp,#48+0]
+       str     r10, [sp,#48+4]
+       str     r11, [sp,#56+0]
+       str     r12, [sp,#56+4]
+       ldr     r5,[r0,#0+LO]
+       ldr     r6,[r0,#0+HI]
+       ldr     r3,[r0,#8+LO]
+       ldr     r4,[r0,#8+HI]
+       ldr     r9, [r0,#16+LO]
+       ldr     r10, [r0,#16+HI]
+       ldr     r11, [r0,#24+LO]
+       ldr     r12, [r0,#24+HI]
+       str     r3,[sp,#8+0]
+       str     r4,[sp,#8+4]
+       str     r9, [sp,#16+0]
+       str     r10, [sp,#16+4]
+       str     r11, [sp,#24+0]
+       str     r12, [sp,#24+4]
+       ldr     r3,[r0,#40+LO]
+       ldr     r4,[r0,#40+HI]
+       str     r3,[sp,#40+0]
+       str     r4,[sp,#40+4]
+
+.L00_15:
+#if __ARM_ARCH__<7
+       ldrb    r3,[r1,#7]
+       ldrb    r9, [r1,#6]
+       ldrb    r10, [r1,#5]
+       ldrb    r11, [r1,#4]
+       ldrb    r4,[r1,#3]
+       ldrb    r12, [r1,#2]
+       orr     r3,r3,r9,lsl#8
+       ldrb    r9, [r1,#1]
+       orr     r3,r3,r10,lsl#16
+       ldrb    r10, [r1],#8
+       orr     r3,r3,r11,lsl#24
+       orr     r4,r4,r12,lsl#8
+       orr     r4,r4,r9,lsl#16
+       orr     r4,r4,r10,lsl#24
+#else
+       ldr     r3,[r1,#4]
+       ldr     r4,[r1],#8
+#ifdef __ARMEL__
+       rev     r3,r3
+       rev     r4,r4
+#endif
+#endif
+       @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
+       @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+       @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+       mov     r9,r7,lsr#14
+       str     r3,[sp,#64+0]
+       mov     r10,r8,lsr#14
+       str     r4,[sp,#64+4]
+       eor     r9,r9,r8,lsl#18
+       ldr     r11,[sp,#56+0]  @ h.lo
+       eor     r10,r10,r7,lsl#18
+       ldr     r12,[sp,#56+4]  @ h.hi
+       eor     r9,r9,r7,lsr#18
+       eor     r10,r10,r8,lsr#18
+       eor     r9,r9,r8,lsl#14
+       eor     r10,r10,r7,lsl#14
+       eor     r9,r9,r8,lsr#9
+       eor     r10,r10,r7,lsr#9
+       eor     r9,r9,r7,lsl#23
+       eor     r10,r10,r8,lsl#23       @ Sigma1(e)
+       adds    r3,r3,r9
+       ldr     r9,[sp,#40+0]   @ f.lo
+       adc     r4,r4,r10               @ T += Sigma1(e)
+       ldr     r10,[sp,#40+4]  @ f.hi
+       adds    r3,r3,r11
+       ldr     r11,[sp,#48+0]  @ g.lo
+       adc     r4,r4,r12               @ T += h
+       ldr     r12,[sp,#48+4]  @ g.hi
+
+       eor     r9,r9,r11
+       str     r7,[sp,#32+0]
+       eor     r10,r10,r12
+       str     r8,[sp,#32+4]
+       and     r9,r9,r7
+       str     r5,[sp,#0+0]
+       and     r10,r10,r8
+       str     r6,[sp,#0+4]
+       eor     r9,r9,r11
+       ldr     r11,[r14,#LO]   @ K[i].lo
+       eor     r10,r10,r12             @ Ch(e,f,g)
+       ldr     r12,[r14,#HI]   @ K[i].hi
+
+       adds    r3,r3,r9
+       ldr     r7,[sp,#24+0]   @ d.lo
+       adc     r4,r4,r10               @ T += Ch(e,f,g)
+       ldr     r8,[sp,#24+4]   @ d.hi
+       adds    r3,r3,r11
+       and     r9,r11,#0xff
+       adc     r4,r4,r12               @ T += K[i]
+       adds    r7,r7,r3
+       ldr     r11,[sp,#8+0]   @ b.lo
+       adc     r8,r8,r4                @ d += T
+       teq     r9,#148
+
+       ldr     r12,[sp,#16+0]  @ c.lo
+       orreq   r14,r14,#1
+       @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+       @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+       @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+       mov     r9,r5,lsr#28
+       mov     r10,r6,lsr#28
+       eor     r9,r9,r6,lsl#4
+       eor     r10,r10,r5,lsl#4
+       eor     r9,r9,r6,lsr#2
+       eor     r10,r10,r5,lsr#2
+       eor     r9,r9,r5,lsl#30
+       eor     r10,r10,r6,lsl#30
+       eor     r9,r9,r6,lsr#7
+       eor     r10,r10,r5,lsr#7
+       eor     r9,r9,r5,lsl#25
+       eor     r10,r10,r6,lsl#25       @ Sigma0(a)
+       adds    r3,r3,r9
+       and     r9,r5,r11
+       adc     r4,r4,r10               @ T += Sigma0(a)
+
+       ldr     r10,[sp,#8+4]   @ b.hi
+       orr     r5,r5,r11
+       ldr     r11,[sp,#16+4]  @ c.hi
+       and     r5,r5,r12
+       and     r12,r6,r10
+       orr     r6,r6,r10
+       orr     r5,r5,r9                @ Maj(a,b,c).lo
+       and     r6,r6,r11
+       adds    r5,r5,r3
+       orr     r6,r6,r12               @ Maj(a,b,c).hi
+       sub     sp,sp,#8
+       adc     r6,r6,r4                @ h += T
+       tst     r14,#1
+       add     r14,r14,#8
+       tst     r14,#1
+       beq     .L00_15
+       ldr     r9,[sp,#184+0]
+       ldr     r10,[sp,#184+4]
+       bic     r14,r14,#1
+.L16_79:
+       @ sigma0(x)     (ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
+       @ LO            lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
+       @ HI            hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
+       mov     r3,r9,lsr#1
+       ldr     r11,[sp,#80+0]
+       mov     r4,r10,lsr#1
+       ldr     r12,[sp,#80+4]
+       eor     r3,r3,r10,lsl#31
+       eor     r4,r4,r9,lsl#31
+       eor     r3,r3,r9,lsr#8
+       eor     r4,r4,r10,lsr#8
+       eor     r3,r3,r10,lsl#24
+       eor     r4,r4,r9,lsl#24
+       eor     r3,r3,r9,lsr#7
+       eor     r4,r4,r10,lsr#7
+       eor     r3,r3,r10,lsl#25
+
+       @ sigma1(x)     (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
+       @ LO            lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
+       @ HI            hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
+       mov     r9,r11,lsr#19
+       mov     r10,r12,lsr#19
+       eor     r9,r9,r12,lsl#13
+       eor     r10,r10,r11,lsl#13
+       eor     r9,r9,r12,lsr#29
+       eor     r10,r10,r11,lsr#29
+       eor     r9,r9,r11,lsl#3
+       eor     r10,r10,r12,lsl#3
+       eor     r9,r9,r11,lsr#6
+       eor     r10,r10,r12,lsr#6
+       ldr     r11,[sp,#120+0]
+       eor     r9,r9,r12,lsl#26
+
+       ldr     r12,[sp,#120+4]
+       adds    r3,r3,r9
+       ldr     r9,[sp,#192+0]
+       adc     r4,r4,r10
+
+       ldr     r10,[sp,#192+4]
+       adds    r3,r3,r11
+       adc     r4,r4,r12
+       adds    r3,r3,r9
+       adc     r4,r4,r10
+       @ Sigma1(x)     (ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
+       @ LO            lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
+       @ HI            hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
+       mov     r9,r7,lsr#14
+       str     r3,[sp,#64+0]
+       mov     r10,r8,lsr#14
+       str     r4,[sp,#64+4]
+       eor     r9,r9,r8,lsl#18
+       ldr     r11,[sp,#56+0]  @ h.lo
+       eor     r10,r10,r7,lsl#18
+       ldr     r12,[sp,#56+4]  @ h.hi
+       eor     r9,r9,r7,lsr#18
+       eor     r10,r10,r8,lsr#18
+       eor     r9,r9,r8,lsl#14
+       eor     r10,r10,r7,lsl#14
+       eor     r9,r9,r8,lsr#9
+       eor     r10,r10,r7,lsr#9
+       eor     r9,r9,r7,lsl#23
+       eor     r10,r10,r8,lsl#23       @ Sigma1(e)
+       adds    r3,r3,r9
+       ldr     r9,[sp,#40+0]   @ f.lo
+       adc     r4,r4,r10               @ T += Sigma1(e)
+       ldr     r10,[sp,#40+4]  @ f.hi
+       adds    r3,r3,r11
+       ldr     r11,[sp,#48+0]  @ g.lo
+       adc     r4,r4,r12               @ T += h
+       ldr     r12,[sp,#48+4]  @ g.hi
+
+       eor     r9,r9,r11
+       str     r7,[sp,#32+0]
+       eor     r10,r10,r12
+       str     r8,[sp,#32+4]
+       and     r9,r9,r7
+       str     r5,[sp,#0+0]
+       and     r10,r10,r8
+       str     r6,[sp,#0+4]
+       eor     r9,r9,r11
+       ldr     r11,[r14,#LO]   @ K[i].lo
+       eor     r10,r10,r12             @ Ch(e,f,g)
+       ldr     r12,[r14,#HI]   @ K[i].hi
+
+       adds    r3,r3,r9
+       ldr     r7,[sp,#24+0]   @ d.lo
+       adc     r4,r4,r10               @ T += Ch(e,f,g)
+       ldr     r8,[sp,#24+4]   @ d.hi
+       adds    r3,r3,r11
+       and     r9,r11,#0xff
+       adc     r4,r4,r12               @ T += K[i]
+       adds    r7,r7,r3
+       ldr     r11,[sp,#8+0]   @ b.lo
+       adc     r8,r8,r4                @ d += T
+       teq     r9,#23
+
+       ldr     r12,[sp,#16+0]  @ c.lo
+       orreq   r14,r14,#1
+       @ Sigma0(x)     (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
+       @ LO            lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
+       @ HI            hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
+       mov     r9,r5,lsr#28
+       mov     r10,r6,lsr#28
+       eor     r9,r9,r6,lsl#4
+       eor     r10,r10,r5,lsl#4
+       eor     r9,r9,r6,lsr#2
+       eor     r10,r10,r5,lsr#2
+       eor     r9,r9,r5,lsl#30
+       eor     r10,r10,r6,lsl#30
+       eor     r9,r9,r6,lsr#7
+       eor     r10,r10,r5,lsr#7
+       eor     r9,r9,r5,lsl#25
+       eor     r10,r10,r6,lsl#25       @ Sigma0(a)
+       adds    r3,r3,r9
+       and     r9,r5,r11
+       adc     r4,r4,r10               @ T += Sigma0(a)
+
+       ldr     r10,[sp,#8+4]   @ b.hi
+       orr     r5,r5,r11
+       ldr     r11,[sp,#16+4]  @ c.hi
+       and     r5,r5,r12
+       and     r12,r6,r10
+       orr     r6,r6,r10
+       orr     r5,r5,r9                @ Maj(a,b,c).lo
+       and     r6,r6,r11
+       adds    r5,r5,r3
+       orr     r6,r6,r12               @ Maj(a,b,c).hi
+       sub     sp,sp,#8
+       adc     r6,r6,r4                @ h += T
+       tst     r14,#1
+       add     r14,r14,#8
+       ldreq   r9,[sp,#184+0]
+       ldreq   r10,[sp,#184+4]
+       beq     .L16_79
+       bic     r14,r14,#1
+
+       ldr     r3,[sp,#8+0]
+       ldr     r4,[sp,#8+4]
+       ldr     r9, [r0,#0+LO]
+       ldr     r10, [r0,#0+HI]
+       ldr     r11, [r0,#8+LO]
+       ldr     r12, [r0,#8+HI]
+       adds    r9,r5,r9
+       str     r9, [r0,#0+LO]
+       adc     r10,r6,r10
+       str     r10, [r0,#0+HI]
+       adds    r11,r3,r11
+       str     r11, [r0,#8+LO]
+       adc     r12,r4,r12
+       str     r12, [r0,#8+HI]
+
+       ldr     r5,[sp,#16+0]
+       ldr     r6,[sp,#16+4]
+       ldr     r3,[sp,#24+0]
+       ldr     r4,[sp,#24+4]
+       ldr     r9, [r0,#16+LO]
+       ldr     r10, [r0,#16+HI]
+       ldr     r11, [r0,#24+LO]
+       ldr     r12, [r0,#24+HI]
+       adds    r9,r5,r9
+       str     r9, [r0,#16+LO]
+       adc     r10,r6,r10
+       str     r10, [r0,#16+HI]
+       adds    r11,r3,r11
+       str     r11, [r0,#24+LO]
+       adc     r12,r4,r12
+       str     r12, [r0,#24+HI]
+
+       ldr     r3,[sp,#40+0]
+       ldr     r4,[sp,#40+4]
+       ldr     r9, [r0,#32+LO]
+       ldr     r10, [r0,#32+HI]
+       ldr     r11, [r0,#40+LO]
+       ldr     r12, [r0,#40+HI]
+       adds    r7,r7,r9
+       str     r7,[r0,#32+LO]
+       adc     r8,r8,r10
+       str     r8,[r0,#32+HI]
+       adds    r11,r3,r11
+       str     r11, [r0,#40+LO]
+       adc     r12,r4,r12
+       str     r12, [r0,#40+HI]
+
+       ldr     r5,[sp,#48+0]
+       ldr     r6,[sp,#48+4]
+       ldr     r3,[sp,#56+0]
+       ldr     r4,[sp,#56+4]
+       ldr     r9, [r0,#48+LO]
+       ldr     r10, [r0,#48+HI]
+       ldr     r11, [r0,#56+LO]
+       ldr     r12, [r0,#56+HI]
+       adds    r9,r5,r9
+       str     r9, [r0,#48+LO]
+       adc     r10,r6,r10
+       str     r10, [r0,#48+HI]
+       adds    r11,r3,r11
+       str     r11, [r0,#56+LO]
+       adc     r12,r4,r12
+       str     r12, [r0,#56+HI]
+
+       add     sp,sp,#640
+       sub     r14,r14,#640
+
+       teq     r1,r2
+       bne     .Loop
+
+       add     sp,sp,#8*9              @ destroy frame
+#if __ARM_ARCH__>=5
+       ldmia   sp!,{r4-r12,pc}
+#else
+       ldmia   sp!,{r4-r12,lr}
+       tst     lr,#1
+       moveq   pc,lr                   @ be binary compatible with V4, yet
+       .word   0xe12fff1e                      @ interoperable with Thumb ISA:-)
+#endif
+#if __ARM_ARCH__>=7
+.fpu   neon
+
+.align 4
+.LNEON:
+       dmb                             @ errata #451034 on early Cortex A8
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+       sub     r3,r3,#672              @ K512
+       vldmia  r0,{d16-d23}            @ load context
+.Loop_neon:
+       vshr.u64        d24,d20,#14     @ 0
+#if 0<16
+       vld1.64         {d0},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d20,#18
+       vshr.u64        d26,d20,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d20,#50
+       vsli.64         d25,d20,#46
+       vsli.64         d26,d20,#23
+#if 0<16 && defined(__ARMEL__)
+       vrev64.8        d0,d0
+#endif
+       vadd.i64        d27,d28,d23
+       veor            d29,d21,d22
+       veor            d24,d25
+       vand            d29,d20
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d22                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d16,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d16,#34
+       vshr.u64        d26,d16,#39
+       vsli.64         d24,d16,#36
+       vsli.64         d25,d16,#30
+       vsli.64         d26,d16,#25
+       vadd.i64        d27,d0
+       vorr            d30,d16,d18
+       vand            d29,d16,d18
+       veor            d23,d24,d25
+       vand            d30,d17
+       veor            d23,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d23,d27
+       vadd.i64        d19,d27
+       vadd.i64        d23,d30
+       vshr.u64        d24,d19,#14     @ 1
+#if 1<16
+       vld1.64         {d1},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d19,#18
+       vshr.u64        d26,d19,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d19,#50
+       vsli.64         d25,d19,#46
+       vsli.64         d26,d19,#23
+#if 1<16 && defined(__ARMEL__)
+       vrev64.8        d1,d1
+#endif
+       vadd.i64        d27,d28,d22
+       veor            d29,d20,d21
+       veor            d24,d25
+       vand            d29,d19
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d21                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d23,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d23,#34
+       vshr.u64        d26,d23,#39
+       vsli.64         d24,d23,#36
+       vsli.64         d25,d23,#30
+       vsli.64         d26,d23,#25
+       vadd.i64        d27,d1
+       vorr            d30,d23,d17
+       vand            d29,d23,d17
+       veor            d22,d24,d25
+       vand            d30,d16
+       veor            d22,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d22,d27
+       vadd.i64        d18,d27
+       vadd.i64        d22,d30
+       vshr.u64        d24,d18,#14     @ 2
+#if 2<16
+       vld1.64         {d2},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d18,#18
+       vshr.u64        d26,d18,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d18,#50
+       vsli.64         d25,d18,#46
+       vsli.64         d26,d18,#23
+#if 2<16 && defined(__ARMEL__)
+       vrev64.8        d2,d2
+#endif
+       vadd.i64        d27,d28,d21
+       veor            d29,d19,d20
+       veor            d24,d25
+       vand            d29,d18
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d20                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d22,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d22,#34
+       vshr.u64        d26,d22,#39
+       vsli.64         d24,d22,#36
+       vsli.64         d25,d22,#30
+       vsli.64         d26,d22,#25
+       vadd.i64        d27,d2
+       vorr            d30,d22,d16
+       vand            d29,d22,d16
+       veor            d21,d24,d25
+       vand            d30,d23
+       veor            d21,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d21,d27
+       vadd.i64        d17,d27
+       vadd.i64        d21,d30
+       vshr.u64        d24,d17,#14     @ 3
+#if 3<16
+       vld1.64         {d3},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d17,#18
+       vshr.u64        d26,d17,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d17,#50
+       vsli.64         d25,d17,#46
+       vsli.64         d26,d17,#23
+#if 3<16 && defined(__ARMEL__)
+       vrev64.8        d3,d3
+#endif
+       vadd.i64        d27,d28,d20
+       veor            d29,d18,d19
+       veor            d24,d25
+       vand            d29,d17
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d19                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d21,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d21,#34
+       vshr.u64        d26,d21,#39
+       vsli.64         d24,d21,#36
+       vsli.64         d25,d21,#30
+       vsli.64         d26,d21,#25
+       vadd.i64        d27,d3
+       vorr            d30,d21,d23
+       vand            d29,d21,d23
+       veor            d20,d24,d25
+       vand            d30,d22
+       veor            d20,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d20,d27
+       vadd.i64        d16,d27
+       vadd.i64        d20,d30
+       vshr.u64        d24,d16,#14     @ 4
+#if 4<16
+       vld1.64         {d4},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d16,#18
+       vshr.u64        d26,d16,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d16,#50
+       vsli.64         d25,d16,#46
+       vsli.64         d26,d16,#23
+#if 4<16 && defined(__ARMEL__)
+       vrev64.8        d4,d4
+#endif
+       vadd.i64        d27,d28,d19
+       veor            d29,d17,d18
+       veor            d24,d25
+       vand            d29,d16
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d18                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d20,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d20,#34
+       vshr.u64        d26,d20,#39
+       vsli.64         d24,d20,#36
+       vsli.64         d25,d20,#30
+       vsli.64         d26,d20,#25
+       vadd.i64        d27,d4
+       vorr            d30,d20,d22
+       vand            d29,d20,d22
+       veor            d19,d24,d25
+       vand            d30,d21
+       veor            d19,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d19,d27
+       vadd.i64        d23,d27
+       vadd.i64        d19,d30
+       vshr.u64        d24,d23,#14     @ 5
+#if 5<16
+       vld1.64         {d5},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d23,#18
+       vshr.u64        d26,d23,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d23,#50
+       vsli.64         d25,d23,#46
+       vsli.64         d26,d23,#23
+#if 5<16 && defined(__ARMEL__)
+       vrev64.8        d5,d5
+#endif
+       vadd.i64        d27,d28,d18
+       veor            d29,d16,d17
+       veor            d24,d25
+       vand            d29,d23
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d17                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d19,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d19,#34
+       vshr.u64        d26,d19,#39
+       vsli.64         d24,d19,#36
+       vsli.64         d25,d19,#30
+       vsli.64         d26,d19,#25
+       vadd.i64        d27,d5
+       vorr            d30,d19,d21
+       vand            d29,d19,d21
+       veor            d18,d24,d25
+       vand            d30,d20
+       veor            d18,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d18,d27
+       vadd.i64        d22,d27
+       vadd.i64        d18,d30
+       vshr.u64        d24,d22,#14     @ 6
+#if 6<16
+       vld1.64         {d6},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d22,#18
+       vshr.u64        d26,d22,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d22,#50
+       vsli.64         d25,d22,#46
+       vsli.64         d26,d22,#23
+#if 6<16 && defined(__ARMEL__)
+       vrev64.8        d6,d6
+#endif
+       vadd.i64        d27,d28,d17
+       veor            d29,d23,d16
+       veor            d24,d25
+       vand            d29,d22
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d16                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d18,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d18,#34
+       vshr.u64        d26,d18,#39
+       vsli.64         d24,d18,#36
+       vsli.64         d25,d18,#30
+       vsli.64         d26,d18,#25
+       vadd.i64        d27,d6
+       vorr            d30,d18,d20
+       vand            d29,d18,d20
+       veor            d17,d24,d25
+       vand            d30,d19
+       veor            d17,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d17,d27
+       vadd.i64        d21,d27
+       vadd.i64        d17,d30
+       vshr.u64        d24,d21,#14     @ 7
+#if 7<16
+       vld1.64         {d7},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d21,#18
+       vshr.u64        d26,d21,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d21,#50
+       vsli.64         d25,d21,#46
+       vsli.64         d26,d21,#23
+#if 7<16 && defined(__ARMEL__)
+       vrev64.8        d7,d7
+#endif
+       vadd.i64        d27,d28,d16
+       veor            d29,d22,d23
+       veor            d24,d25
+       vand            d29,d21
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d23                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d17,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d17,#34
+       vshr.u64        d26,d17,#39
+       vsli.64         d24,d17,#36
+       vsli.64         d25,d17,#30
+       vsli.64         d26,d17,#25
+       vadd.i64        d27,d7
+       vorr            d30,d17,d19
+       vand            d29,d17,d19
+       veor            d16,d24,d25
+       vand            d30,d18
+       veor            d16,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d16,d27
+       vadd.i64        d20,d27
+       vadd.i64        d16,d30
+       vshr.u64        d24,d20,#14     @ 8
+#if 8<16
+       vld1.64         {d8},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d20,#18
+       vshr.u64        d26,d20,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d20,#50
+       vsli.64         d25,d20,#46
+       vsli.64         d26,d20,#23
+#if 8<16 && defined(__ARMEL__)
+       vrev64.8        d8,d8
+#endif
+       vadd.i64        d27,d28,d23
+       veor            d29,d21,d22
+       veor            d24,d25
+       vand            d29,d20
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d22                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d16,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d16,#34
+       vshr.u64        d26,d16,#39
+       vsli.64         d24,d16,#36
+       vsli.64         d25,d16,#30
+       vsli.64         d26,d16,#25
+       vadd.i64        d27,d8
+       vorr            d30,d16,d18
+       vand            d29,d16,d18
+       veor            d23,d24,d25
+       vand            d30,d17
+       veor            d23,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d23,d27
+       vadd.i64        d19,d27
+       vadd.i64        d23,d30
+       vshr.u64        d24,d19,#14     @ 9
+#if 9<16
+       vld1.64         {d9},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d19,#18
+       vshr.u64        d26,d19,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d19,#50
+       vsli.64         d25,d19,#46
+       vsli.64         d26,d19,#23
+#if 9<16 && defined(__ARMEL__)
+       vrev64.8        d9,d9
+#endif
+       vadd.i64        d27,d28,d22
+       veor            d29,d20,d21
+       veor            d24,d25
+       vand            d29,d19
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d21                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d23,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d23,#34
+       vshr.u64        d26,d23,#39
+       vsli.64         d24,d23,#36
+       vsli.64         d25,d23,#30
+       vsli.64         d26,d23,#25
+       vadd.i64        d27,d9
+       vorr            d30,d23,d17
+       vand            d29,d23,d17
+       veor            d22,d24,d25
+       vand            d30,d16
+       veor            d22,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d22,d27
+       vadd.i64        d18,d27
+       vadd.i64        d22,d30
+       vshr.u64        d24,d18,#14     @ 10
+#if 10<16
+       vld1.64         {d10},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d18,#18
+       vshr.u64        d26,d18,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d18,#50
+       vsli.64         d25,d18,#46
+       vsli.64         d26,d18,#23
+#if 10<16 && defined(__ARMEL__)
+       vrev64.8        d10,d10
+#endif
+       vadd.i64        d27,d28,d21
+       veor            d29,d19,d20
+       veor            d24,d25
+       vand            d29,d18
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d20                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d22,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d22,#34
+       vshr.u64        d26,d22,#39
+       vsli.64         d24,d22,#36
+       vsli.64         d25,d22,#30
+       vsli.64         d26,d22,#25
+       vadd.i64        d27,d10
+       vorr            d30,d22,d16
+       vand            d29,d22,d16
+       veor            d21,d24,d25
+       vand            d30,d23
+       veor            d21,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d21,d27
+       vadd.i64        d17,d27
+       vadd.i64        d21,d30
+       vshr.u64        d24,d17,#14     @ 11
+#if 11<16
+       vld1.64         {d11},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d17,#18
+       vshr.u64        d26,d17,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d17,#50
+       vsli.64         d25,d17,#46
+       vsli.64         d26,d17,#23
+#if 11<16 && defined(__ARMEL__)
+       vrev64.8        d11,d11
+#endif
+       vadd.i64        d27,d28,d20
+       veor            d29,d18,d19
+       veor            d24,d25
+       vand            d29,d17
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d19                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d21,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d21,#34
+       vshr.u64        d26,d21,#39
+       vsli.64         d24,d21,#36
+       vsli.64         d25,d21,#30
+       vsli.64         d26,d21,#25
+       vadd.i64        d27,d11
+       vorr            d30,d21,d23
+       vand            d29,d21,d23
+       veor            d20,d24,d25
+       vand            d30,d22
+       veor            d20,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d20,d27
+       vadd.i64        d16,d27
+       vadd.i64        d20,d30
+       vshr.u64        d24,d16,#14     @ 12
+#if 12<16
+       vld1.64         {d12},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d16,#18
+       vshr.u64        d26,d16,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d16,#50
+       vsli.64         d25,d16,#46
+       vsli.64         d26,d16,#23
+#if 12<16 && defined(__ARMEL__)
+       vrev64.8        d12,d12
+#endif
+       vadd.i64        d27,d28,d19
+       veor            d29,d17,d18
+       veor            d24,d25
+       vand            d29,d16
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d18                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d20,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d20,#34
+       vshr.u64        d26,d20,#39
+       vsli.64         d24,d20,#36
+       vsli.64         d25,d20,#30
+       vsli.64         d26,d20,#25
+       vadd.i64        d27,d12
+       vorr            d30,d20,d22
+       vand            d29,d20,d22
+       veor            d19,d24,d25
+       vand            d30,d21
+       veor            d19,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d19,d27
+       vadd.i64        d23,d27
+       vadd.i64        d19,d30
+       vshr.u64        d24,d23,#14     @ 13
+#if 13<16
+       vld1.64         {d13},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d23,#18
+       vshr.u64        d26,d23,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d23,#50
+       vsli.64         d25,d23,#46
+       vsli.64         d26,d23,#23
+#if 13<16 && defined(__ARMEL__)
+       vrev64.8        d13,d13
+#endif
+       vadd.i64        d27,d28,d18
+       veor            d29,d16,d17
+       veor            d24,d25
+       vand            d29,d23
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d17                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d19,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d19,#34
+       vshr.u64        d26,d19,#39
+       vsli.64         d24,d19,#36
+       vsli.64         d25,d19,#30
+       vsli.64         d26,d19,#25
+       vadd.i64        d27,d13
+       vorr            d30,d19,d21
+       vand            d29,d19,d21
+       veor            d18,d24,d25
+       vand            d30,d20
+       veor            d18,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d18,d27
+       vadd.i64        d22,d27
+       vadd.i64        d18,d30
+       vshr.u64        d24,d22,#14     @ 14
+#if 14<16
+       vld1.64         {d14},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d22,#18
+       vshr.u64        d26,d22,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d22,#50
+       vsli.64         d25,d22,#46
+       vsli.64         d26,d22,#23
+#if 14<16 && defined(__ARMEL__)
+       vrev64.8        d14,d14
+#endif
+       vadd.i64        d27,d28,d17
+       veor            d29,d23,d16
+       veor            d24,d25
+       vand            d29,d22
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d16                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d18,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d18,#34
+       vshr.u64        d26,d18,#39
+       vsli.64         d24,d18,#36
+       vsli.64         d25,d18,#30
+       vsli.64         d26,d18,#25
+       vadd.i64        d27,d14
+       vorr            d30,d18,d20
+       vand            d29,d18,d20
+       veor            d17,d24,d25
+       vand            d30,d19
+       veor            d17,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d17,d27
+       vadd.i64        d21,d27
+       vadd.i64        d17,d30
+       vshr.u64        d24,d21,#14     @ 15
+#if 15<16
+       vld1.64         {d15},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d21,#18
+       vshr.u64        d26,d21,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d21,#50
+       vsli.64         d25,d21,#46
+       vsli.64         d26,d21,#23
+#if 15<16 && defined(__ARMEL__)
+       vrev64.8        d15,d15
+#endif
+       vadd.i64        d27,d28,d16
+       veor            d29,d22,d23
+       veor            d24,d25
+       vand            d29,d21
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d23                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d17,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d17,#34
+       vshr.u64        d26,d17,#39
+       vsli.64         d24,d17,#36
+       vsli.64         d25,d17,#30
+       vsli.64         d26,d17,#25
+       vadd.i64        d27,d15
+       vorr            d30,d17,d19
+       vand            d29,d17,d19
+       veor            d16,d24,d25
+       vand            d30,d18
+       veor            d16,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d16,d27
+       vadd.i64        d20,d27
+       vadd.i64        d16,d30
+       mov             r12,#4
+.L16_79_neon:
+       subs            r12,#1
+       vshr.u64        q12,q7,#19
+       vshr.u64        q13,q7,#61
+       vshr.u64        q15,q7,#6
+       vsli.64         q12,q7,#45
+       vext.8          q14,q0,q1,#8    @ X[i+1]
+       vsli.64         q13,q7,#3
+       veor            q15,q12
+       vshr.u64        q12,q14,#1
+       veor            q15,q13                         @ sigma1(X[i+14])
+       vshr.u64        q13,q14,#8
+       vadd.i64        q0,q15
+       vshr.u64        q15,q14,#7
+       vsli.64         q12,q14,#63
+       vsli.64         q13,q14,#56
+       vext.8          q14,q4,q5,#8    @ X[i+9]
+       veor            q15,q12
+       vshr.u64        d24,d20,#14             @ from NEON_00_15
+       vadd.i64        q0,q14
+       vshr.u64        d25,d20,#18             @ from NEON_00_15
+       veor            q15,q13                         @ sigma0(X[i+1])
+       vshr.u64        d26,d20,#41             @ from NEON_00_15
+       vadd.i64        q0,q15
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d20,#50
+       vsli.64         d25,d20,#46
+       vsli.64         d26,d20,#23
+#if 16<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d23
+       veor            d29,d21,d22
+       veor            d24,d25
+       vand            d29,d20
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d22                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d16,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d16,#34
+       vshr.u64        d26,d16,#39
+       vsli.64         d24,d16,#36
+       vsli.64         d25,d16,#30
+       vsli.64         d26,d16,#25
+       vadd.i64        d27,d0
+       vorr            d30,d16,d18
+       vand            d29,d16,d18
+       veor            d23,d24,d25
+       vand            d30,d17
+       veor            d23,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d23,d27
+       vadd.i64        d19,d27
+       vadd.i64        d23,d30
+       vshr.u64        d24,d19,#14     @ 17
+#if 17<16
+       vld1.64         {d1},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d19,#18
+       vshr.u64        d26,d19,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d19,#50
+       vsli.64         d25,d19,#46
+       vsli.64         d26,d19,#23
+#if 17<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d22
+       veor            d29,d20,d21
+       veor            d24,d25
+       vand            d29,d19
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d21                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d23,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d23,#34
+       vshr.u64        d26,d23,#39
+       vsli.64         d24,d23,#36
+       vsli.64         d25,d23,#30
+       vsli.64         d26,d23,#25
+       vadd.i64        d27,d1
+       vorr            d30,d23,d17
+       vand            d29,d23,d17
+       veor            d22,d24,d25
+       vand            d30,d16
+       veor            d22,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d22,d27
+       vadd.i64        d18,d27
+       vadd.i64        d22,d30
+       vshr.u64        q12,q0,#19
+       vshr.u64        q13,q0,#61
+       vshr.u64        q15,q0,#6
+       vsli.64         q12,q0,#45
+       vext.8          q14,q1,q2,#8    @ X[i+1]
+       vsli.64         q13,q0,#3
+       veor            q15,q12
+       vshr.u64        q12,q14,#1
+       veor            q15,q13                         @ sigma1(X[i+14])
+       vshr.u64        q13,q14,#8
+       vadd.i64        q1,q15
+       vshr.u64        q15,q14,#7
+       vsli.64         q12,q14,#63
+       vsli.64         q13,q14,#56
+       vext.8          q14,q5,q6,#8    @ X[i+9]
+       veor            q15,q12
+       vshr.u64        d24,d18,#14             @ from NEON_00_15
+       vadd.i64        q1,q14
+       vshr.u64        d25,d18,#18             @ from NEON_00_15
+       veor            q15,q13                         @ sigma0(X[i+1])
+       vshr.u64        d26,d18,#41             @ from NEON_00_15
+       vadd.i64        q1,q15
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d18,#50
+       vsli.64         d25,d18,#46
+       vsli.64         d26,d18,#23
+#if 18<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d21
+       veor            d29,d19,d20
+       veor            d24,d25
+       vand            d29,d18
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d20                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d22,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d22,#34
+       vshr.u64        d26,d22,#39
+       vsli.64         d24,d22,#36
+       vsli.64         d25,d22,#30
+       vsli.64         d26,d22,#25
+       vadd.i64        d27,d2
+       vorr            d30,d22,d16
+       vand            d29,d22,d16
+       veor            d21,d24,d25
+       vand            d30,d23
+       veor            d21,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d21,d27
+       vadd.i64        d17,d27
+       vadd.i64        d21,d30
+       vshr.u64        d24,d17,#14     @ 19
+#if 19<16
+       vld1.64         {d3},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d17,#18
+       vshr.u64        d26,d17,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d17,#50
+       vsli.64         d25,d17,#46
+       vsli.64         d26,d17,#23
+#if 19<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d20
+       veor            d29,d18,d19
+       veor            d24,d25
+       vand            d29,d17
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d19                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d21,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d21,#34
+       vshr.u64        d26,d21,#39
+       vsli.64         d24,d21,#36
+       vsli.64         d25,d21,#30
+       vsli.64         d26,d21,#25
+       vadd.i64        d27,d3
+       vorr            d30,d21,d23
+       vand            d29,d21,d23
+       veor            d20,d24,d25
+       vand            d30,d22
+       veor            d20,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d20,d27
+       vadd.i64        d16,d27
+       vadd.i64        d20,d30
+       vshr.u64        q12,q1,#19
+       vshr.u64        q13,q1,#61
+       vshr.u64        q15,q1,#6
+       vsli.64         q12,q1,#45
+       vext.8          q14,q2,q3,#8    @ X[i+1]
+       vsli.64         q13,q1,#3
+       veor            q15,q12
+       vshr.u64        q12,q14,#1
+       veor            q15,q13                         @ sigma1(X[i+14])
+       vshr.u64        q13,q14,#8
+       vadd.i64        q2,q15
+       vshr.u64        q15,q14,#7
+       vsli.64         q12,q14,#63
+       vsli.64         q13,q14,#56
+       vext.8          q14,q6,q7,#8    @ X[i+9]
+       veor            q15,q12
+       vshr.u64        d24,d16,#14             @ from NEON_00_15
+       vadd.i64        q2,q14
+       vshr.u64        d25,d16,#18             @ from NEON_00_15
+       veor            q15,q13                         @ sigma0(X[i+1])
+       vshr.u64        d26,d16,#41             @ from NEON_00_15
+       vadd.i64        q2,q15
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d16,#50
+       vsli.64         d25,d16,#46
+       vsli.64         d26,d16,#23
+#if 20<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d19
+       veor            d29,d17,d18
+       veor            d24,d25
+       vand            d29,d16
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d18                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d20,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d20,#34
+       vshr.u64        d26,d20,#39
+       vsli.64         d24,d20,#36
+       vsli.64         d25,d20,#30
+       vsli.64         d26,d20,#25
+       vadd.i64        d27,d4
+       vorr            d30,d20,d22
+       vand            d29,d20,d22
+       veor            d19,d24,d25
+       vand            d30,d21
+       veor            d19,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d19,d27
+       vadd.i64        d23,d27
+       vadd.i64        d19,d30
+       vshr.u64        d24,d23,#14     @ 21
+#if 21<16
+       vld1.64         {d5},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d23,#18
+       vshr.u64        d26,d23,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d23,#50
+       vsli.64         d25,d23,#46
+       vsli.64         d26,d23,#23
+#if 21<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d18
+       veor            d29,d16,d17
+       veor            d24,d25
+       vand            d29,d23
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d17                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d19,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d19,#34
+       vshr.u64        d26,d19,#39
+       vsli.64         d24,d19,#36
+       vsli.64         d25,d19,#30
+       vsli.64         d26,d19,#25
+       vadd.i64        d27,d5
+       vorr            d30,d19,d21
+       vand            d29,d19,d21
+       veor            d18,d24,d25
+       vand            d30,d20
+       veor            d18,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d18,d27
+       vadd.i64        d22,d27
+       vadd.i64        d18,d30
+       vshr.u64        q12,q2,#19
+       vshr.u64        q13,q2,#61
+       vshr.u64        q15,q2,#6
+       vsli.64         q12,q2,#45
+       vext.8          q14,q3,q4,#8    @ X[i+1]
+       vsli.64         q13,q2,#3
+       veor            q15,q12
+       vshr.u64        q12,q14,#1
+       veor            q15,q13                         @ sigma1(X[i+14])
+       vshr.u64        q13,q14,#8
+       vadd.i64        q3,q15
+       vshr.u64        q15,q14,#7
+       vsli.64         q12,q14,#63
+       vsli.64         q13,q14,#56
+       vext.8          q14,q7,q0,#8    @ X[i+9]
+       veor            q15,q12
+       vshr.u64        d24,d22,#14             @ from NEON_00_15
+       vadd.i64        q3,q14
+       vshr.u64        d25,d22,#18             @ from NEON_00_15
+       veor            q15,q13                         @ sigma0(X[i+1])
+       vshr.u64        d26,d22,#41             @ from NEON_00_15
+       vadd.i64        q3,q15
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d22,#50
+       vsli.64         d25,d22,#46
+       vsli.64         d26,d22,#23
+#if 22<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d17
+       veor            d29,d23,d16
+       veor            d24,d25
+       vand            d29,d22
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d16                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d18,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d18,#34
+       vshr.u64        d26,d18,#39
+       vsli.64         d24,d18,#36
+       vsli.64         d25,d18,#30
+       vsli.64         d26,d18,#25
+       vadd.i64        d27,d6
+       vorr            d30,d18,d20
+       vand            d29,d18,d20
+       veor            d17,d24,d25
+       vand            d30,d19
+       veor            d17,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d17,d27
+       vadd.i64        d21,d27
+       vadd.i64        d17,d30
+       vshr.u64        d24,d21,#14     @ 23
+#if 23<16
+       vld1.64         {d7},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d21,#18
+       vshr.u64        d26,d21,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d21,#50
+       vsli.64         d25,d21,#46
+       vsli.64         d26,d21,#23
+#if 23<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d16
+       veor            d29,d22,d23
+       veor            d24,d25
+       vand            d29,d21
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d23                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d17,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d17,#34
+       vshr.u64        d26,d17,#39
+       vsli.64         d24,d17,#36
+       vsli.64         d25,d17,#30
+       vsli.64         d26,d17,#25
+       vadd.i64        d27,d7
+       vorr            d30,d17,d19
+       vand            d29,d17,d19
+       veor            d16,d24,d25
+       vand            d30,d18
+       veor            d16,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d16,d27
+       vadd.i64        d20,d27
+       vadd.i64        d16,d30
+       vshr.u64        q12,q3,#19
+       vshr.u64        q13,q3,#61
+       vshr.u64        q15,q3,#6
+       vsli.64         q12,q3,#45
+       vext.8          q14,q4,q5,#8    @ X[i+1]
+       vsli.64         q13,q3,#3
+       veor            q15,q12
+       vshr.u64        q12,q14,#1
+       veor            q15,q13                         @ sigma1(X[i+14])
+       vshr.u64        q13,q14,#8
+       vadd.i64        q4,q15
+       vshr.u64        q15,q14,#7
+       vsli.64         q12,q14,#63
+       vsli.64         q13,q14,#56
+       vext.8          q14,q0,q1,#8    @ X[i+9]
+       veor            q15,q12
+       vshr.u64        d24,d20,#14             @ from NEON_00_15
+       vadd.i64        q4,q14
+       vshr.u64        d25,d20,#18             @ from NEON_00_15
+       veor            q15,q13                         @ sigma0(X[i+1])
+       vshr.u64        d26,d20,#41             @ from NEON_00_15
+       vadd.i64        q4,q15
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d20,#50
+       vsli.64         d25,d20,#46
+       vsli.64         d26,d20,#23
+#if 24<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d23
+       veor            d29,d21,d22
+       veor            d24,d25
+       vand            d29,d20
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d22                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d16,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d16,#34
+       vshr.u64        d26,d16,#39
+       vsli.64         d24,d16,#36
+       vsli.64         d25,d16,#30
+       vsli.64         d26,d16,#25
+       vadd.i64        d27,d8
+       vorr            d30,d16,d18
+       vand            d29,d16,d18
+       veor            d23,d24,d25
+       vand            d30,d17
+       veor            d23,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d23,d27
+       vadd.i64        d19,d27
+       vadd.i64        d23,d30
+       vshr.u64        d24,d19,#14     @ 25
+#if 25<16
+       vld1.64         {d9},[r1]!      @ handles unaligned
+#endif
+       vshr.u64        d25,d19,#18
+       vshr.u64        d26,d19,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d19,#50
+       vsli.64         d25,d19,#46
+       vsli.64         d26,d19,#23
+#if 25<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d22
+       veor            d29,d20,d21
+       veor            d24,d25
+       vand            d29,d19
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d21                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d23,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d23,#34
+       vshr.u64        d26,d23,#39
+       vsli.64         d24,d23,#36
+       vsli.64         d25,d23,#30
+       vsli.64         d26,d23,#25
+       vadd.i64        d27,d9
+       vorr            d30,d23,d17
+       vand            d29,d23,d17
+       veor            d22,d24,d25
+       vand            d30,d16
+       veor            d22,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d22,d27
+       vadd.i64        d18,d27
+       vadd.i64        d22,d30
+       vshr.u64        q12,q4,#19
+       vshr.u64        q13,q4,#61
+       vshr.u64        q15,q4,#6
+       vsli.64         q12,q4,#45
+       vext.8          q14,q5,q6,#8    @ X[i+1]
+       vsli.64         q13,q4,#3
+       veor            q15,q12
+       vshr.u64        q12,q14,#1
+       veor            q15,q13                         @ sigma1(X[i+14])
+       vshr.u64        q13,q14,#8
+       vadd.i64        q5,q15
+       vshr.u64        q15,q14,#7
+       vsli.64         q12,q14,#63
+       vsli.64         q13,q14,#56
+       vext.8          q14,q1,q2,#8    @ X[i+9]
+       veor            q15,q12
+       vshr.u64        d24,d18,#14             @ from NEON_00_15
+       vadd.i64        q5,q14
+       vshr.u64        d25,d18,#18             @ from NEON_00_15
+       veor            q15,q13                         @ sigma0(X[i+1])
+       vshr.u64        d26,d18,#41             @ from NEON_00_15
+       vadd.i64        q5,q15
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d18,#50
+       vsli.64         d25,d18,#46
+       vsli.64         d26,d18,#23
+#if 26<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d21
+       veor            d29,d19,d20
+       veor            d24,d25
+       vand            d29,d18
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d20                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d22,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d22,#34
+       vshr.u64        d26,d22,#39
+       vsli.64         d24,d22,#36
+       vsli.64         d25,d22,#30
+       vsli.64         d26,d22,#25
+       vadd.i64        d27,d10
+       vorr            d30,d22,d16
+       vand            d29,d22,d16
+       veor            d21,d24,d25
+       vand            d30,d23
+       veor            d21,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d21,d27
+       vadd.i64        d17,d27
+       vadd.i64        d21,d30
+       vshr.u64        d24,d17,#14     @ 27
+#if 27<16
+       vld1.64         {d11},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d17,#18
+       vshr.u64        d26,d17,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d17,#50
+       vsli.64         d25,d17,#46
+       vsli.64         d26,d17,#23
+#if 27<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d20
+       veor            d29,d18,d19
+       veor            d24,d25
+       vand            d29,d17
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d19                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d21,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d21,#34
+       vshr.u64        d26,d21,#39
+       vsli.64         d24,d21,#36
+       vsli.64         d25,d21,#30
+       vsli.64         d26,d21,#25
+       vadd.i64        d27,d11
+       vorr            d30,d21,d23
+       vand            d29,d21,d23
+       veor            d20,d24,d25
+       vand            d30,d22
+       veor            d20,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d20,d27
+       vadd.i64        d16,d27
+       vadd.i64        d20,d30
+       vshr.u64        q12,q5,#19
+       vshr.u64        q13,q5,#61
+       vshr.u64        q15,q5,#6
+       vsli.64         q12,q5,#45
+       vext.8          q14,q6,q7,#8    @ X[i+1]
+       vsli.64         q13,q5,#3
+       veor            q15,q12
+       vshr.u64        q12,q14,#1
+       veor            q15,q13                         @ sigma1(X[i+14])
+       vshr.u64        q13,q14,#8
+       vadd.i64        q6,q15
+       vshr.u64        q15,q14,#7
+       vsli.64         q12,q14,#63
+       vsli.64         q13,q14,#56
+       vext.8          q14,q2,q3,#8    @ X[i+9]
+       veor            q15,q12
+       vshr.u64        d24,d16,#14             @ from NEON_00_15
+       vadd.i64        q6,q14
+       vshr.u64        d25,d16,#18             @ from NEON_00_15
+       veor            q15,q13                         @ sigma0(X[i+1])
+       vshr.u64        d26,d16,#41             @ from NEON_00_15
+       vadd.i64        q6,q15
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d16,#50
+       vsli.64         d25,d16,#46
+       vsli.64         d26,d16,#23
+#if 28<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d19
+       veor            d29,d17,d18
+       veor            d24,d25
+       vand            d29,d16
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d18                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d20,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d20,#34
+       vshr.u64        d26,d20,#39
+       vsli.64         d24,d20,#36
+       vsli.64         d25,d20,#30
+       vsli.64         d26,d20,#25
+       vadd.i64        d27,d12
+       vorr            d30,d20,d22
+       vand            d29,d20,d22
+       veor            d19,d24,d25
+       vand            d30,d21
+       veor            d19,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d19,d27
+       vadd.i64        d23,d27
+       vadd.i64        d19,d30
+       vshr.u64        d24,d23,#14     @ 29
+#if 29<16
+       vld1.64         {d13},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d23,#18
+       vshr.u64        d26,d23,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d23,#50
+       vsli.64         d25,d23,#46
+       vsli.64         d26,d23,#23
+#if 29<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d18
+       veor            d29,d16,d17
+       veor            d24,d25
+       vand            d29,d23
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d17                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d19,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d19,#34
+       vshr.u64        d26,d19,#39
+       vsli.64         d24,d19,#36
+       vsli.64         d25,d19,#30
+       vsli.64         d26,d19,#25
+       vadd.i64        d27,d13
+       vorr            d30,d19,d21
+       vand            d29,d19,d21
+       veor            d18,d24,d25
+       vand            d30,d20
+       veor            d18,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d18,d27
+       vadd.i64        d22,d27
+       vadd.i64        d18,d30
+       vshr.u64        q12,q6,#19
+       vshr.u64        q13,q6,#61
+       vshr.u64        q15,q6,#6
+       vsli.64         q12,q6,#45
+       vext.8          q14,q7,q0,#8    @ X[i+1]
+       vsli.64         q13,q6,#3
+       veor            q15,q12
+       vshr.u64        q12,q14,#1
+       veor            q15,q13                         @ sigma1(X[i+14])
+       vshr.u64        q13,q14,#8
+       vadd.i64        q7,q15
+       vshr.u64        q15,q14,#7
+       vsli.64         q12,q14,#63
+       vsli.64         q13,q14,#56
+       vext.8          q14,q3,q4,#8    @ X[i+9]
+       veor            q15,q12
+       vshr.u64        d24,d22,#14             @ from NEON_00_15
+       vadd.i64        q7,q14
+       vshr.u64        d25,d22,#18             @ from NEON_00_15
+       veor            q15,q13                         @ sigma0(X[i+1])
+       vshr.u64        d26,d22,#41             @ from NEON_00_15
+       vadd.i64        q7,q15
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d22,#50
+       vsli.64         d25,d22,#46
+       vsli.64         d26,d22,#23
+#if 30<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d17
+       veor            d29,d23,d16
+       veor            d24,d25
+       vand            d29,d22
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d16                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d18,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d18,#34
+       vshr.u64        d26,d18,#39
+       vsli.64         d24,d18,#36
+       vsli.64         d25,d18,#30
+       vsli.64         d26,d18,#25
+       vadd.i64        d27,d14
+       vorr            d30,d18,d20
+       vand            d29,d18,d20
+       veor            d17,d24,d25
+       vand            d30,d19
+       veor            d17,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d17,d27
+       vadd.i64        d21,d27
+       vadd.i64        d17,d30
+       vshr.u64        d24,d21,#14     @ 31
+#if 31<16
+       vld1.64         {d15},[r1]!     @ handles unaligned
+#endif
+       vshr.u64        d25,d21,#18
+       vshr.u64        d26,d21,#41
+       vld1.64         {d28},[r3,:64]! @ K[i++]
+       vsli.64         d24,d21,#50
+       vsli.64         d25,d21,#46
+       vsli.64         d26,d21,#23
+#if 31<16 && defined(__ARMEL__)
+       vrev64.8        ,
+#endif
+       vadd.i64        d27,d28,d16
+       veor            d29,d22,d23
+       veor            d24,d25
+       vand            d29,d21
+       veor            d24,d26                 @ Sigma1(e)
+       veor            d29,d23                 @ Ch(e,f,g)
+       vadd.i64        d27,d24
+       vshr.u64        d24,d17,#28
+       vadd.i64        d27,d29
+       vshr.u64        d25,d17,#34
+       vshr.u64        d26,d17,#39
+       vsli.64         d24,d17,#36
+       vsli.64         d25,d17,#30
+       vsli.64         d26,d17,#25
+       vadd.i64        d27,d15
+       vorr            d30,d17,d19
+       vand            d29,d17,d19
+       veor            d16,d24,d25
+       vand            d30,d18
+       veor            d16,d26                 @ Sigma0(a)
+       vorr            d30,d29         @ Maj(a,b,c)
+       vadd.i64        d16,d27
+       vadd.i64        d20,d27
+       vadd.i64        d16,d30
+       bne             .L16_79_neon
+
+       vldmia          r0,{d24-d31}    @ load context to temp
+       vadd.i64        q8,q12          @ vectorized accumulate
+       vadd.i64        q9,q13
+       vadd.i64        q10,q14
+       vadd.i64        q11,q15
+       vstmia          r0,{d16-d23}    @ save context
+       teq             r1,r2
+       sub             r3,#640 @ rewind K512
+       bne             .Loop_neon
+
+       vldmia  sp!,{d8-d15}            @ epilogue
+       .word   0xe12fff1e
+#endif
+.size  sha512_block_data_order,.-sha512_block_data_order
+.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 2
+.comm  OPENSSL_armcap_P,4,4
index b3ae004..5c654c9 100644 (file)
         ['exclude', 'store/.*$']
       ],
       'conditions': [
-        ['target_arch!="ia32" and target_arch!="x64"', {
+        ['target_arch!="ia32" and target_arch!="x64" and target_arch!="arm"', {
           # Disable asm
           'defines': [
             'OPENSSL_NO_ASM'
           # Enable asm
           'defines': [
             'AES_ASM',
-            'VPAES_ASM',
-            'BF_ASM',
-            'BNCO_ASM',
-            'BN_ASM',
             'CPUID_ASM',
-            'DES_ASM',
-            'LIB_BN_ASM',
-            'MD5_ASM',
-            'OPENSSL_BN_ASM',
             'OPENSSL_BN_ASM_MONT',
             'OPENSSL_CPUID_OBJ',
-            'RIP_ASM',
-            'RMD160_ASM',
             'SHA1_ASM',
             'SHA256_ASM',
             'SHA512_ASM',
             'GHASH_ASM',
-            'WHIRLPOOL_ASM',
-            'WP_ASM'
           ],
           'conditions': [
+            # Extended assembly on non-arm platforms
+            ['target_arch!="arm"', {
+              'defines': [
+                'VPAES_ASM',
+                'BN_ASM',
+                'BF_ASM',
+                'BNCO_ASM',
+                'DES_ASM',
+                'LIB_BN_ASM',
+                'MD5_ASM',
+                'OPENSSL_BN_ASM',
+                'RIP_ASM',
+                'RMD160_ASM',
+                'WHIRLPOOL_ASM',
+                'WP_ASM',
+              ],
+            }],
             ['OS!="win" and OS!="mac" and target_arch=="ia32"', {
               'sources': [
                 'asm/x86-elf-gas/aes/aes-586.s',
                 'openssl/crypto/des/fcrypt_b.c'
               ]
             }],
+            ['target_arch=="arm"', {
+              'sources': [
+                'asm/arm-elf-gas/aes/aes-armv4.s',
+                'asm/arm-elf-gas/bn/armv4-mont.s',
+                'asm/arm-elf-gas/bn/armv4-gf2m.s',
+                'asm/arm-elf-gas/sha/sha1-armv4-large.s',
+                'asm/arm-elf-gas/sha/sha512-armv4.s',
+                'asm/arm-elf-gas/sha/sha256-armv4.s',
+                'asm/arm-elf-gas/modes/ghash-armv4.s',
+                # No asm available
+                'openssl/crypto/aes/aes_cbc.c',
+                'openssl/crypto/bf/bf_enc.c',
+                'openssl/crypto/bn/bn_asm.c',
+                'openssl/crypto/cast/c_enc.c',
+                'openssl/crypto/camellia/camellia.c',
+                'openssl/crypto/camellia/cmll_cbc.c',
+                'openssl/crypto/camellia/cmll_misc.c',
+                'openssl/crypto/des/des_enc.c',
+                'openssl/crypto/des/fcrypt_b.c',
+                'openssl/crypto/rc4/rc4_enc.c',
+                'openssl/crypto/rc4/rc4_skey.c',
+                'openssl/crypto/whrlpool/wp_block.c',
+                # PCAP stuff
+                'openssl/crypto/armcap.c',
+                'openssl/crypto/armv4cpuid.S',
+              ]
+            }],
             ['OS=="win" and target_arch=="ia32"', {
               'sources': [
                 'asm/x86-win32-masm/aes/aes-586.asm',
             'HAVE_DLFCN_H'
           ],
         }],
-        ['target_arch=="arm"', {
-          'sources': ['openssl/crypto/armcap.c'],
-        }],
       ],
       'include_dirs': [
         '.',