summaryrefslogtreecommitdiff
path: root/recipes/gnutls
diff options
context:
space:
mode:
authorMatthew Waters <matthew@centricular.com>2018-04-11 15:04:59 +1000
committerMatthew Waters <matthew@centricular.com>2018-04-11 15:08:12 +1000
commit002ddaffbe776fa12ade82242bc2355bc77c4f19 (patch)
tree2ed3b992e4f7f7e0127ba8419d92402a50b0ec30 /recipes/gnutls
parentc145a01483aeb1acea58e1cf108b1e272ed8dfef (diff)
recipes/gnutls: also rename assembly functions on macos/ios
Fixes build error: Undefined symbols for architecture x86_64: "__aesni_cbc_encrypt", referenced from: _aes_encrypt in libaccelerated.a(aes-cbc-x86-aesni.o) _aes_decrypt in libaccelerated.a(aes-cbc-x86-aesni.o) "__aesni_ctr32_encrypt_blocks", referenced from: _aes_gcm_encrypt in libaccelerated.a(aes-gcm-x86-pclmul-avx.o) _aes_gcm_decrypt in libaccelerated.a(aes-gcm-x86-pclmul-avx.o) _aes_gcm_encrypt in libaccelerated.a(aes-gcm-x86-pclmul.o) _aes_gcm_decrypt in libaccelerated.a(aes-gcm-x86-pclmul.o) "__aesni_ecb_encrypt", referenced from: _x86_aes_encrypt in libaccelerated.a(aes-ccm-x86-aesni.o) _x86_aes_encrypt in libaccelerated.a(aes-gcm-x86-aesni.o) _aes_gcm_cipher_setkey in libaccelerated.a(aes-gcm-x86-pclmul-avx.o) _aes_gcm_setiv in libaccelerated.a(aes-gcm-x86-pclmul-avx.o) _aes_gcm_cipher_setkey in libaccelerated.a(aes-gcm-x86-pclmul.o) _aes_gcm_setiv in libaccelerated.a(aes-gcm-x86-pclmul.o) "__aesni_set_decrypt_key", referenced from: _aes_cipher_setkey in libaccelerated.a(aes-cbc-x86-aesni.o) "__aesni_set_encrypt_key", referenced from: _aes_cipher_setkey in libaccelerated.a(aes-cbc-x86-aesni.o) _aes_ccm_cipher_setkey in libaccelerated.a(aes-ccm-x86-aesni.o) _aes_gcm_cipher_setkey in libaccelerated.a(aes-gcm-x86-aesni.o) _aes_gcm_cipher_setkey in libaccelerated.a(aes-gcm-x86-pclmul-avx.o) _aes_gcm_cipher_setkey in libaccelerated.a(aes-gcm-x86-pclmul.o) ld: symbol(s) not found for architecture x86_64 clang: error: linker command failed with exit code 1 (use -v to see invocation)
Diffstat (limited to 'recipes/gnutls')
-rw-r--r--recipes/gnutls/0001-asm-rename-some-assembly-functions-to-not-conflict-w.patch1108
1 files changed, 1099 insertions, 9 deletions
diff --git a/recipes/gnutls/0001-asm-rename-some-assembly-functions-to-not-conflict-w.patch b/recipes/gnutls/0001-asm-rename-some-assembly-functions-to-not-conflict-w.patch
index c906a218..c1930699 100644
--- a/recipes/gnutls/0001-asm-rename-some-assembly-functions-to-not-conflict-w.patch
+++ b/recipes/gnutls/0001-asm-rename-some-assembly-functions-to-not-conflict-w.patch
@@ -1,4 +1,4 @@
-From 693567fbc472d157f29b0f5b90eb6831f4ae4986 Mon Sep 17 00:00:00 2001
+From f17e88c7eaaa58ce1e3f4f052b03ea2811e04533 Mon Sep 17 00:00:00 2001
From: Matthew Waters <matthew@centricular.com>
Date: Tue, 27 Mar 2018 19:10:26 +1100
Subject: [PATCH] asm: rename some assembly functions to not conflict with
@@ -30,10 +30,14 @@ components would produce duplicate symbols and fail to link.
lib/accelerated/x86/elf/sha256-ssse3-x86.s | 8 +-
lib/accelerated/x86/elf/sha512-ssse3-x86.s | 8 +-
lib/accelerated/x86/elf/sha512-ssse3-x86_64.s | 14 +-
- lib/accelerated/x86/macosx/sha1-ssse3-x86_64.s | 2 +-
- lib/accelerated/x86/macosx/sha512-ssse3-x86_64.s | 2 +-
+ lib/accelerated/x86/macosx/aesni-x86.s | 152 +++++++++---------
+ lib/accelerated/x86/macosx/aesni-x86_64.s | 124 +++++++--------
+ lib/accelerated/x86/macosx/sha1-ssse3-x86.s | 6 +-
+ lib/accelerated/x86/macosx/sha1-ssse3-x86_64.s | 6 +-
+ lib/accelerated/x86/macosx/sha256-ssse3-x86.s | 6 +-
+ lib/accelerated/x86/macosx/sha512-ssse3-x86_64.s | 6 +-
lib/accelerated/x86/sha-x86-ssse3.c | 12 +-
- 26 files changed, 316 insertions(+), 316 deletions(-)
+ 30 files changed, 464 insertions(+), 464 deletions(-)
diff --git a/lib/accelerated/aarch64/elf/sha1-armv8.s b/lib/accelerated/aarch64/elf/sha1-armv8.s
index 0dc68fe..d24ac5c 100644
@@ -1799,29 +1803,1115 @@ index d51d816..f2a9b57 100644
.section .note.GNU-stack,"",%progbits
+diff --git a/lib/accelerated/x86/macosx/aesni-x86.s b/lib/accelerated/x86/macosx/aesni-x86.s
+index 09ca1cb..d9bde6e 100644
+--- a/lib/accelerated/x86/macosx/aesni-x86.s
++++ b/lib/accelerated/x86/macosx/aesni-x86.s
+@@ -39,10 +39,10 @@
+ #
+ .file "devel/perlasm/aesni-x86.s"
+ .text
+-.globl _aesni_encrypt
++.globl __aesni_encrypt
+ .align 4
+-_aesni_encrypt:
+-L_aesni_encrypt_begin:
++__aesni_encrypt:
++L__aesni_encrypt_begin:
+ movl 4(%esp),%eax
+ movl 12(%esp),%edx
+ movups (%eax),%xmm2
+@@ -61,10 +61,10 @@ L000enc1_loop_1:
+ .byte 102,15,56,221,209
+ movups %xmm2,(%eax)
+ ret
+-.globl _aesni_decrypt
++.globl __aesni_decrypt
+ .align 4
+-_aesni_decrypt:
+-L_aesni_decrypt_begin:
++__aesni_decrypt:
++L__aesni_decrypt_begin:
+ movl 4(%esp),%eax
+ movl 12(%esp),%edx
+ movups (%eax),%xmm2
+@@ -84,7 +84,7 @@ L001dec1_loop_2:
+ movups %xmm2,(%eax)
+ ret
+ .align 4
+-__aesni_encrypt3:
++___aesni_encrypt3:
+ movups (%edx),%xmm0
+ shrl $1,%ecx
+ movups 16(%edx),%xmm1
+@@ -113,7 +113,7 @@ L002enc3_loop:
+ .byte 102,15,56,221,224
+ ret
+ .align 4
+-__aesni_decrypt3:
++___aesni_decrypt3:
+ movups (%edx),%xmm0
+ shrl $1,%ecx
+ movups 16(%edx),%xmm1
+@@ -142,7 +142,7 @@ L003dec3_loop:
+ .byte 102,15,56,223,224
+ ret
+ .align 4
+-__aesni_encrypt4:
++___aesni_encrypt4:
+ movups (%edx),%xmm0
+ movups 16(%edx),%xmm1
+ shrl $1,%ecx
+@@ -176,7 +176,7 @@ L004enc4_loop:
+ .byte 102,15,56,221,232
+ ret
+ .align 4
+-__aesni_decrypt4:
++___aesni_decrypt4:
+ movups (%edx),%xmm0
+ movups 16(%edx),%xmm1
+ shrl $1,%ecx
+@@ -210,7 +210,7 @@ L005dec4_loop:
+ .byte 102,15,56,223,232
+ ret
+ .align 4
+-__aesni_encrypt6:
++___aesni_encrypt6:
+ movups (%edx),%xmm0
+ shrl $1,%ecx
+ movups 16(%edx),%xmm1
+@@ -229,7 +229,7 @@ __aesni_encrypt6:
+ .byte 102,15,56,220,241
+ movups (%edx),%xmm0
+ .byte 102,15,56,220,249
+- jmp L_aesni_encrypt6_enter
++ jmp L__aesni_encrypt6_enter
+ .align 4,0x90
+ L006enc6_loop:
+ .byte 102,15,56,220,209
+@@ -240,7 +240,7 @@ L006enc6_loop:
+ .byte 102,15,56,220,241
+ .byte 102,15,56,220,249
+ .align 4,0x90
+-L_aesni_encrypt6_enter:
++L__aesni_encrypt6_enter:
+ movups 16(%edx),%xmm1
+ .byte 102,15,56,220,208
+ .byte 102,15,56,220,216
+@@ -265,7 +265,7 @@ L_aesni_encrypt6_enter:
+ .byte 102,15,56,221,248
+ ret
+ .align 4
+-__aesni_decrypt6:
++___aesni_decrypt6:
+ movups (%edx),%xmm0
+ shrl $1,%ecx
+ movups 16(%edx),%xmm1
+@@ -284,7 +284,7 @@ __aesni_decrypt6:
+ .byte 102,15,56,222,241
+ movups (%edx),%xmm0
+ .byte 102,15,56,222,249
+- jmp L_aesni_decrypt6_enter
++ jmp L__aesni_decrypt6_enter
+ .align 4,0x90
+ L007dec6_loop:
+ .byte 102,15,56,222,209
+@@ -295,7 +295,7 @@ L007dec6_loop:
+ .byte 102,15,56,222,241
+ .byte 102,15,56,222,249
+ .align 4,0x90
+-L_aesni_decrypt6_enter:
++L__aesni_decrypt6_enter:
+ movups 16(%edx),%xmm1
+ .byte 102,15,56,222,208
+ .byte 102,15,56,222,216
+@@ -319,10 +319,10 @@ L_aesni_decrypt6_enter:
+ .byte 102,15,56,223,240
+ .byte 102,15,56,223,248
+ ret
+-.globl _aesni_ecb_encrypt
++.globl __aesni_ecb_encrypt
+ .align 4
+-_aesni_ecb_encrypt:
+-L_aesni_ecb_encrypt_begin:
++__aesni_ecb_encrypt:
++L__aesni_ecb_encrypt_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+@@ -367,7 +367,7 @@ L012ecb_enc_loop6:
+ movdqu 80(%esi),%xmm7
+ leal 96(%esi),%esi
+ L011ecb_enc_loop6_enter:
+- call __aesni_encrypt6
++ call ___aesni_encrypt6
+ movl %ebp,%edx
+ movl %ebx,%ecx
+ subl $96,%eax
+@@ -394,7 +394,7 @@ L010ecb_enc_tail:
+ je L016ecb_enc_four
+ movups 64(%esi),%xmm6
+ xorps %xmm7,%xmm7
+- call __aesni_encrypt6
++ call ___aesni_encrypt6
+ movups %xmm2,(%edi)
+ movups %xmm3,16(%edi)
+ movups %xmm4,32(%edi)
+@@ -419,20 +419,20 @@ L017enc1_loop_3:
+ .align 4,0x90
+ L014ecb_enc_two:
+ xorps %xmm4,%xmm4
+- call __aesni_encrypt3
++ call ___aesni_encrypt3
+ movups %xmm2,(%edi)
+ movups %xmm3,16(%edi)
+ jmp L008ecb_ret
+ .align 4,0x90
+ L015ecb_enc_three:
+- call __aesni_encrypt3
++ call ___aesni_encrypt3
+ movups %xmm2,(%edi)
+ movups %xmm3,16(%edi)
+ movups %xmm4,32(%edi)
+ jmp L008ecb_ret
+ .align 4,0x90
+ L016ecb_enc_four:
+- call __aesni_encrypt4
++ call ___aesni_encrypt4
+ movups %xmm2,(%edi)
+ movups %xmm3,16(%edi)
+ movups %xmm4,32(%edi)
+@@ -470,7 +470,7 @@ L020ecb_dec_loop6:
+ movdqu 80(%esi),%xmm7
+ leal 96(%esi),%esi
+ L019ecb_dec_loop6_enter:
+- call __aesni_decrypt6
++ call ___aesni_decrypt6
+ movl %ebp,%edx
+ movl %ebx,%ecx
+ subl $96,%eax
+@@ -497,7 +497,7 @@ L018ecb_dec_tail:
+ je L024ecb_dec_four
+ movups 64(%esi),%xmm6
+ xorps %xmm7,%xmm7
+- call __aesni_decrypt6
++ call ___aesni_decrypt6
+ movups %xmm2,(%edi)
+ movups %xmm3,16(%edi)
+ movups %xmm4,32(%edi)
+@@ -522,20 +522,20 @@ L025dec1_loop_4:
+ .align 4,0x90
+ L022ecb_dec_two:
+ xorps %xmm4,%xmm4
+- call __aesni_decrypt3
++ call ___aesni_decrypt3
+ movups %xmm2,(%edi)
+ movups %xmm3,16(%edi)
+ jmp L008ecb_ret
+ .align 4,0x90
+ L023ecb_dec_three:
+- call __aesni_decrypt3
++ call ___aesni_decrypt3
+ movups %xmm2,(%edi)
+ movups %xmm3,16(%edi)
+ movups %xmm4,32(%edi)
+ jmp L008ecb_ret
+ .align 4,0x90
+ L024ecb_dec_four:
+- call __aesni_decrypt4
++ call ___aesni_decrypt4
+ movups %xmm2,(%edi)
+ movups %xmm3,16(%edi)
+ movups %xmm4,32(%edi)
+@@ -546,10 +546,10 @@ L008ecb_ret:
+ popl %ebx
+ popl %ebp
+ ret
+-.globl _aesni_ccm64_encrypt_blocks
++.globl __aesni_ccm64_encrypt_blocks
+ .align 4
+-_aesni_ccm64_encrypt_blocks:
+-L_aesni_ccm64_encrypt_blocks_begin:
++__aesni_ccm64_encrypt_blocks:
++L__aesni_ccm64_encrypt_blocks_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+@@ -624,10 +624,10 @@ L027ccm64_enc2_loop:
+ popl %ebx
+ popl %ebp
+ ret
+-.globl _aesni_ccm64_decrypt_blocks
++.globl __aesni_ccm64_decrypt_blocks
+ .align 4
+-_aesni_ccm64_decrypt_blocks:
+-L_aesni_ccm64_decrypt_blocks_begin:
++__aesni_ccm64_decrypt_blocks:
++L__aesni_ccm64_decrypt_blocks_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+@@ -734,10 +734,10 @@ L032enc1_loop_6:
+ popl %ebx
+ popl %ebp
+ ret
+-.globl _aesni_ctr32_encrypt_blocks
++.globl __aesni_ctr32_encrypt_blocks
+ .align 4
+-_aesni_ctr32_encrypt_blocks:
+-L_aesni_ctr32_encrypt_blocks_begin:
++__aesni_ctr32_encrypt_blocks:
++L__aesni_ctr32_encrypt_blocks_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+@@ -826,7 +826,7 @@ L035ctr32_loop6:
+ .byte 102,15,56,220,241
+ movups (%edx),%xmm0
+ .byte 102,15,56,220,249
+- call L_aesni_encrypt6_enter
++ call L__aesni_encrypt6_enter
+ movups (%esi),%xmm1
+ movups 16(%esi),%xmm0
+ xorps %xmm1,%xmm2
+@@ -881,7 +881,7 @@ L034ctr32_tail:
+ por %xmm7,%xmm5
+ je L040ctr32_four
+ por %xmm7,%xmm6
+- call __aesni_encrypt6
++ call ___aesni_encrypt6
+ movups (%esi),%xmm1
+ movups 16(%esi),%xmm0
+ xorps %xmm1,%xmm2
+@@ -920,7 +920,7 @@ L041enc1_loop_7:
+ jmp L036ctr32_ret
+ .align 4,0x90
+ L038ctr32_two:
+- call __aesni_encrypt3
++ call ___aesni_encrypt3
+ movups (%esi),%xmm5
+ movups 16(%esi),%xmm6
+ xorps %xmm5,%xmm2
+@@ -930,7 +930,7 @@ L038ctr32_two:
+ jmp L036ctr32_ret
+ .align 4,0x90
+ L039ctr32_three:
+- call __aesni_encrypt3
++ call ___aesni_encrypt3
+ movups (%esi),%xmm5
+ movups 16(%esi),%xmm6
+ xorps %xmm5,%xmm2
+@@ -943,7 +943,7 @@ L039ctr32_three:
+ jmp L036ctr32_ret
+ .align 4,0x90
+ L040ctr32_four:
+- call __aesni_encrypt4
++ call ___aesni_encrypt4
+ movups (%esi),%xmm6
+ movups 16(%esi),%xmm7
+ movups 32(%esi),%xmm1
+@@ -963,10 +963,10 @@ L036ctr32_ret:
+ popl %ebx
+ popl %ebp
+ ret
+-.globl _aesni_xts_encrypt
++.globl __aesni_xts_encrypt
+ .align 4
+-_aesni_xts_encrypt:
+-L_aesni_xts_encrypt_begin:
++__aesni_xts_encrypt:
++L__aesni_xts_encrypt_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+@@ -1078,7 +1078,7 @@ L044xts_enc_loop6:
+ .byte 102,15,56,220,241
+ movups (%edx),%xmm0
+ .byte 102,15,56,220,249
+- call L_aesni_encrypt6_enter
++ call L__aesni_encrypt6_enter
+ movdqa 80(%esp),%xmm1
+ pxor %xmm0,%xmm0
+ xorps (%esp),%xmm2
+@@ -1158,7 +1158,7 @@ L043xts_enc_short:
+ pxor 48(%esp),%xmm5
+ movdqa %xmm7,64(%esp)
+ pxor %xmm7,%xmm6
+- call __aesni_encrypt6
++ call ___aesni_encrypt6
+ movaps 64(%esp),%xmm1
+ xorps (%esp),%xmm2
+ xorps 16(%esp),%xmm3
+@@ -1202,7 +1202,7 @@ L047xts_enc_two:
+ xorps %xmm5,%xmm2
+ xorps %xmm6,%xmm3
+ xorps %xmm4,%xmm4
+- call __aesni_encrypt3
++ call ___aesni_encrypt3
+ xorps %xmm5,%xmm2
+ xorps %xmm6,%xmm3
+ movups %xmm2,(%edi)
+@@ -1220,7 +1220,7 @@ L048xts_enc_three:
+ xorps %xmm5,%xmm2
+ xorps %xmm6,%xmm3
+ xorps %xmm7,%xmm4
+- call __aesni_encrypt3
++ call ___aesni_encrypt3
+ xorps %xmm5,%xmm2
+ xorps %xmm6,%xmm3
+ xorps %xmm7,%xmm4
+@@ -1242,7 +1242,7 @@ L049xts_enc_four:
+ xorps 16(%esp),%xmm3
+ xorps %xmm7,%xmm4
+ xorps %xmm6,%xmm5
+- call __aesni_encrypt4
++ call ___aesni_encrypt4
+ xorps (%esp),%xmm2
+ xorps 16(%esp),%xmm3
+ xorps %xmm7,%xmm4
+@@ -1308,10 +1308,10 @@ L052xts_enc_ret:
+ popl %ebx
+ popl %ebp
+ ret
+-.globl _aesni_xts_decrypt
++.globl __aesni_xts_decrypt
+ .align 4
+-_aesni_xts_decrypt:
+-L_aesni_xts_decrypt_begin:
++__aesni_xts_decrypt:
++L__aesni_xts_decrypt_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+@@ -1428,7 +1428,7 @@ L057xts_dec_loop6:
+ .byte 102,15,56,222,241
+ movups (%edx),%xmm0
+ .byte 102,15,56,222,249
+- call L_aesni_decrypt6_enter
++ call L__aesni_decrypt6_enter
+ movdqa 80(%esp),%xmm1
+ pxor %xmm0,%xmm0
+ xorps (%esp),%xmm2
+@@ -1508,7 +1508,7 @@ L056xts_dec_short:
+ pxor 48(%esp),%xmm5
+ movdqa %xmm7,64(%esp)
+ pxor %xmm7,%xmm6
+- call __aesni_decrypt6
++ call ___aesni_decrypt6
+ movaps 64(%esp),%xmm1
+ xorps (%esp),%xmm2
+ xorps 16(%esp),%xmm3
+@@ -1551,7 +1551,7 @@ L060xts_dec_two:
+ leal 32(%esi),%esi
+ xorps %xmm5,%xmm2
+ xorps %xmm6,%xmm3
+- call __aesni_decrypt3
++ call ___aesni_decrypt3
+ xorps %xmm5,%xmm2
+ xorps %xmm6,%xmm3
+ movups %xmm2,(%edi)
+@@ -1569,7 +1569,7 @@ L061xts_dec_three:
+ xorps %xmm5,%xmm2
+ xorps %xmm6,%xmm3
+ xorps %xmm7,%xmm4
+- call __aesni_decrypt3
++ call ___aesni_decrypt3
+ xorps %xmm5,%xmm2
+ xorps %xmm6,%xmm3
+ xorps %xmm7,%xmm4
+@@ -1591,7 +1591,7 @@ L062xts_dec_four:
+ xorps 16(%esp),%xmm3
+ xorps %xmm7,%xmm4
+ xorps %xmm6,%xmm5
+- call __aesni_decrypt4
++ call ___aesni_decrypt4
+ xorps (%esp),%xmm2
+ xorps 16(%esp),%xmm3
+ xorps %xmm7,%xmm4
+@@ -1682,10 +1682,10 @@ L065xts_dec_ret:
+ popl %ebx
+ popl %ebp
+ ret
+-.globl _aesni_cbc_encrypt
++.globl __aesni_cbc_encrypt
+ .align 4
+-_aesni_cbc_encrypt:
+-L_aesni_cbc_encrypt_begin:
++__aesni_cbc_encrypt:
++L__aesni_cbc_encrypt_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
+@@ -1770,7 +1770,7 @@ L077cbc_dec_loop6_enter:
+ movdqu 48(%esi),%xmm5
+ movdqu 64(%esi),%xmm6
+ movdqu 80(%esi),%xmm7
+- call __aesni_decrypt6
++ call ___aesni_decrypt6
+ movups (%esi),%xmm1
+ movups 16(%esi),%xmm0
+ xorps (%esp),%xmm2
+@@ -1819,7 +1819,7 @@ L076cbc_dec_tail:
+ movaps %xmm7,(%esp)
+ movups (%esi),%xmm2
+ xorps %xmm7,%xmm7
+- call __aesni_decrypt6
++ call ___aesni_decrypt6
+ movups (%esi),%xmm1
+ movups 16(%esi),%xmm0
+ xorps (%esp),%xmm2
+@@ -1858,7 +1858,7 @@ L084dec1_loop_16:
+ .align 4,0x90
+ L081cbc_dec_two:
+ xorps %xmm4,%xmm4
+- call __aesni_decrypt3
++ call ___aesni_decrypt3
+ xorps %xmm7,%xmm2
+ xorps %xmm6,%xmm3
+ movups %xmm2,(%edi)
+@@ -1869,7 +1869,7 @@ L081cbc_dec_two:
+ jmp L079cbc_dec_tail_collected
+ .align 4,0x90
+ L082cbc_dec_three:
+- call __aesni_decrypt3
++ call ___aesni_decrypt3
+ xorps %xmm7,%xmm2
+ xorps %xmm6,%xmm3
+ xorps %xmm5,%xmm4
+@@ -1882,7 +1882,7 @@ L082cbc_dec_three:
+ jmp L079cbc_dec_tail_collected
+ .align 4,0x90
+ L083cbc_dec_four:
+- call __aesni_decrypt4
++ call ___aesni_decrypt4
+ movups 16(%esi),%xmm1
+ movups 32(%esi),%xmm0
+ xorps %xmm7,%xmm2
+@@ -1919,7 +1919,7 @@ L070cbc_abort:
+ popl %ebp
+ ret
+ .align 4
+-__aesni_set_encrypt_key:
++___aesni_set_encrypt_key:
+ testl %eax,%eax
+ jz L086bad_pointer
+ testl %edx,%edx
+@@ -2095,23 +2095,23 @@ L086bad_pointer:
+ L089bad_keybits:
+ movl $-2,%eax
+ ret
+-.globl _aesni_set_encrypt_key
++.globl __aesni_set_encrypt_key
+ .align 4
+-_aesni_set_encrypt_key:
+-L_aesni_set_encrypt_key_begin:
++__aesni_set_encrypt_key:
++L__aesni_set_encrypt_key_begin:
+ movl 4(%esp),%eax
+ movl 8(%esp),%ecx
+ movl 12(%esp),%edx
+- call __aesni_set_encrypt_key
++ call ___aesni_set_encrypt_key
+ ret
+-.globl _aesni_set_decrypt_key
++.globl __aesni_set_decrypt_key
+ .align 4
+-_aesni_set_decrypt_key:
+-L_aesni_set_decrypt_key_begin:
++__aesni_set_decrypt_key:
++L__aesni_set_decrypt_key_begin:
+ movl 4(%esp),%eax
+ movl 8(%esp),%ecx
+ movl 12(%esp),%edx
+- call __aesni_set_encrypt_key
++ call ___aesni_set_encrypt_key
+ movl 12(%esp),%edx
+ shll $4,%ecx
+ testl %eax,%eax
+diff --git a/lib/accelerated/x86/macosx/aesni-x86_64.s b/lib/accelerated/x86/macosx/aesni-x86_64.s
+index f0a5606..f66a101 100644
+--- a/lib/accelerated/x86/macosx/aesni-x86_64.s
++++ b/lib/accelerated/x86/macosx/aesni-x86_64.s
+@@ -39,10 +39,10 @@
+ #
+ .text
+
+-.globl _aesni_encrypt
++.globl __aesni_encrypt
+
+ .p2align 4
+-_aesni_encrypt:
++__aesni_encrypt:
+ movups (%rdi),%xmm2
+ movl 240(%rdx),%eax
+ movups (%rdx),%xmm0
+@@ -63,10 +63,10 @@ L$oop_enc1_1:
+ .byte 0xf3,0xc3
+
+
+-.globl _aesni_decrypt
++.globl __aesni_decrypt
+
+ .p2align 4
+-_aesni_decrypt:
++__aesni_decrypt:
+ movups (%rdi),%xmm2
+ movl 240(%rdx),%eax
+ movups (%rdx),%xmm0
+@@ -88,7 +88,7 @@ L$oop_dec1_2:
+
+
+ .p2align 4
+-_aesni_encrypt2:
++__aesni_encrypt2:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -117,7 +117,7 @@ L$enc_loop2:
+
+
+ .p2align 4
+-_aesni_decrypt2:
++__aesni_decrypt2:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -146,7 +146,7 @@ L$dec_loop2:
+
+
+ .p2align 4
+-_aesni_encrypt3:
++__aesni_encrypt3:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -180,7 +180,7 @@ L$enc_loop3:
+
+
+ .p2align 4
+-_aesni_decrypt3:
++__aesni_decrypt3:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -214,7 +214,7 @@ L$dec_loop3:
+
+
+ .p2align 4
+-_aesni_encrypt4:
++__aesni_encrypt4:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -254,7 +254,7 @@ L$enc_loop4:
+
+
+ .p2align 4
+-_aesni_decrypt4:
++__aesni_decrypt4:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -294,7 +294,7 @@ L$dec_loop4:
+
+
+ .p2align 4
+-_aesni_encrypt6:
++__aesni_encrypt6:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -348,7 +348,7 @@ L$enc_loop6_enter:
+
+
+ .p2align 4
+-_aesni_decrypt6:
++__aesni_decrypt6:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -402,7 +402,7 @@ L$dec_loop6_enter:
+
+
+ .p2align 4
+-_aesni_encrypt8:
++__aesni_encrypt8:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -466,7 +466,7 @@ L$enc_loop8_enter:
+
+
+ .p2align 4
+-_aesni_decrypt8:
++__aesni_decrypt8:
+ movups (%rcx),%xmm0
+ shll $4,%eax
+ movups 16(%rcx),%xmm1
+@@ -528,10 +528,10 @@ L$dec_loop8_enter:
+ .byte 102,68,15,56,223,200
+ .byte 0xf3,0xc3
+
+-.globl _aesni_ecb_encrypt
++.globl __aesni_ecb_encrypt
+
+ .p2align 4
+-_aesni_ecb_encrypt:
++__aesni_ecb_encrypt:
+ andq $-16,%rdx
+ jz L$ecb_ret
+
+@@ -580,7 +580,7 @@ L$ecb_enc_loop8:
+ leaq 128(%rdi),%rdi
+ L$ecb_enc_loop8_enter:
+
+- call _aesni_encrypt8
++ call __aesni_encrypt8
+
+ subq $0x80,%rdx
+ jnc L$ecb_enc_loop8
+@@ -617,7 +617,7 @@ L$ecb_enc_tail:
+ je L$ecb_enc_six
+ movdqu 96(%rdi),%xmm8
+ xorps %xmm9,%xmm9
+- call _aesni_encrypt8
++ call __aesni_encrypt8
+ movups %xmm2,(%rsi)
+ movups %xmm3,16(%rsi)
+ movups %xmm4,32(%rsi)
+@@ -643,20 +643,20 @@ L$oop_enc1_3:
+ jmp L$ecb_ret
+ .p2align 4
+ L$ecb_enc_two:
+- call _aesni_encrypt2
++ call __aesni_encrypt2
+ movups %xmm2,(%rsi)
+ movups %xmm3,16(%rsi)
+ jmp L$ecb_ret
+ .p2align 4
+ L$ecb_enc_three:
+- call _aesni_encrypt3
++ call __aesni_encrypt3
+ movups %xmm2,(%rsi)
+ movups %xmm3,16(%rsi)
+ movups %xmm4,32(%rsi)
+ jmp L$ecb_ret
+ .p2align 4
+ L$ecb_enc_four:
+- call _aesni_encrypt4
++ call __aesni_encrypt4
+ movups %xmm2,(%rsi)
+ movups %xmm3,16(%rsi)
+ movups %xmm4,32(%rsi)
+@@ -665,7 +665,7 @@ L$ecb_enc_four:
+ .p2align 4
+ L$ecb_enc_five:
+ xorps %xmm7,%xmm7
+- call _aesni_encrypt6
++ call __aesni_encrypt6
+ movups %xmm2,(%rsi)
+ movups %xmm3,16(%rsi)
+ movups %xmm4,32(%rsi)
+@@ -674,7 +674,7 @@ L$ecb_enc_five:
+ jmp L$ecb_ret
+ .p2align 4
+ L$ecb_enc_six:
+- call _aesni_encrypt6
++ call __aesni_encrypt6
+ movups %xmm2,(%rsi)
+ movups %xmm3,16(%rsi)
+ movups %xmm4,32(%rsi)
+@@ -723,7 +723,7 @@ L$ecb_dec_loop8:
+ leaq 128(%rdi),%rdi
+ L$ecb_dec_loop8_enter:
+
+- call _aesni_decrypt8
++ call __aesni_decrypt8
+
+ movups (%r11),%xmm0
+ subq $0x80,%rdx
+@@ -770,7 +770,7 @@ L$ecb_dec_tail:
+ movups 96(%rdi),%xmm8
+ movups (%rcx),%xmm0
+ xorps %xmm9,%xmm9
+- call _aesni_decrypt8
++ call __aesni_decrypt8
+ movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ movups %xmm3,16(%rsi)
+@@ -805,7 +805,7 @@ L$oop_dec1_4:
+ jmp L$ecb_ret
+ .p2align 4
+ L$ecb_dec_two:
+- call _aesni_decrypt2
++ call __aesni_decrypt2
+ movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ movups %xmm3,16(%rsi)
+@@ -813,7 +813,7 @@ L$ecb_dec_two:
+ jmp L$ecb_ret
+ .p2align 4
+ L$ecb_dec_three:
+- call _aesni_decrypt3
++ call __aesni_decrypt3
+ movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ movups %xmm3,16(%rsi)
+@@ -823,7 +823,7 @@ L$ecb_dec_three:
+ jmp L$ecb_ret
+ .p2align 4
+ L$ecb_dec_four:
+- call _aesni_decrypt4
++ call __aesni_decrypt4
+ movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ movups %xmm3,16(%rsi)
+@@ -836,7 +836,7 @@ L$ecb_dec_four:
+ .p2align 4
+ L$ecb_dec_five:
+ xorps %xmm7,%xmm7
+- call _aesni_decrypt6
++ call __aesni_decrypt6
+ movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ movups %xmm3,16(%rsi)
+@@ -851,7 +851,7 @@ L$ecb_dec_five:
+ jmp L$ecb_ret
+ .p2align 4
+ L$ecb_dec_six:
+- call _aesni_decrypt6
++ call __aesni_decrypt6
+ movups %xmm2,(%rsi)
+ pxor %xmm2,%xmm2
+ movups %xmm3,16(%rsi)
+@@ -870,10 +870,10 @@ L$ecb_ret:
+ pxor %xmm1,%xmm1
+ .byte 0xf3,0xc3
+
+-.globl _aesni_ccm64_encrypt_blocks
++.globl __aesni_ccm64_encrypt_blocks
+
+ .p2align 4
+-_aesni_ccm64_encrypt_blocks:
++__aesni_ccm64_encrypt_blocks:
+ movl 240(%rcx),%eax
+ movdqu (%r8),%xmm6
+ movdqa L$increment64(%rip),%xmm9
+@@ -933,10 +933,10 @@ L$ccm64_enc2_loop:
+ pxor %xmm6,%xmm6
+ .byte 0xf3,0xc3
+
+-.globl _aesni_ccm64_decrypt_blocks
++.globl __aesni_ccm64_decrypt_blocks
+
+ .p2align 4
+-_aesni_ccm64_decrypt_blocks:
++__aesni_ccm64_decrypt_blocks:
+ movl 240(%rcx),%eax
+ movups (%r8),%xmm6
+ movdqu (%r9),%xmm3
+@@ -1030,10 +1030,10 @@ L$oop_enc1_6:
+ pxor %xmm6,%xmm6
+ .byte 0xf3,0xc3
+
+-.globl _aesni_ctr32_encrypt_blocks
++.globl __aesni_ctr32_encrypt_blocks
+
+ .p2align 4
+-_aesni_ctr32_encrypt_blocks:
++__aesni_ctr32_encrypt_blocks:
+ cmpq $1,%rdx
+ jne L$ctr32_bulk
+
+@@ -1603,10 +1603,10 @@ L$ctr32_done:
+ L$ctr32_epilogue:
+ .byte 0xf3,0xc3
+
+-.globl _aesni_xts_encrypt
++.globl __aesni_xts_encrypt
+
+ .p2align 4
+-_aesni_xts_encrypt:
++__aesni_xts_encrypt:
+ leaq (%rsp),%rax
+ pushq %rbp
+ subq $112,%rsp
+@@ -1899,7 +1899,7 @@ L$xts_enc_short:
+ pxor %xmm14,%xmm6
+ pxor %xmm7,%xmm7
+
+- call _aesni_encrypt6
++ call __aesni_encrypt6
+
+ xorps %xmm10,%xmm2
+ movdqa %xmm15,%xmm10
+@@ -1945,7 +1945,7 @@ L$xts_enc_two:
+ xorps %xmm10,%xmm2
+ xorps %xmm11,%xmm3
+
+- call _aesni_encrypt2
++ call __aesni_encrypt2
+
+ xorps %xmm10,%xmm2
+ movdqa %xmm12,%xmm10
+@@ -1965,7 +1965,7 @@ L$xts_enc_three:
+ xorps %xmm11,%xmm3
+ xorps %xmm12,%xmm4
+
+- call _aesni_encrypt3
++ call __aesni_encrypt3
+
+ xorps %xmm10,%xmm2
+ movdqa %xmm13,%xmm10
+@@ -1989,7 +1989,7 @@ L$xts_enc_four:
+ xorps %xmm12,%xmm4
+ xorps %xmm13,%xmm5
+
+- call _aesni_encrypt4
++ call __aesni_encrypt4
+
+ pxor %xmm10,%xmm2
+ movdqa %xmm14,%xmm10
+@@ -2068,10 +2068,10 @@ L$xts_enc_ret:
+ L$xts_enc_epilogue:
+ .byte 0xf3,0xc3
+
+-.globl _aesni_xts_decrypt
++.globl __aesni_xts_decrypt
+
+ .p2align 4
+-_aesni_xts_decrypt:
++__aesni_xts_decrypt:
+ leaq (%rsp),%rax
+ pushq %rbp
+ subq $112,%rsp
+@@ -2369,7 +2369,7 @@ L$xts_dec_short:
+ pxor %xmm13,%xmm5
+ pxor %xmm14,%xmm6
+
+- call _aesni_decrypt6
++ call __aesni_decrypt6
+
+ xorps %xmm10,%xmm2
+ xorps %xmm11,%xmm3
+@@ -2425,7 +2425,7 @@ L$xts_dec_two:
+ xorps %xmm10,%xmm2
+ xorps %xmm11,%xmm3
+
+- call _aesni_decrypt2
++ call __aesni_decrypt2
+
+ xorps %xmm10,%xmm2
+ movdqa %xmm12,%xmm10
+@@ -2446,7 +2446,7 @@ L$xts_dec_three:
+ xorps %xmm11,%xmm3
+ xorps %xmm12,%xmm4
+
+- call _aesni_decrypt3
++ call __aesni_decrypt3
+
+ xorps %xmm10,%xmm2
+ movdqa %xmm13,%xmm10
+@@ -2471,7 +2471,7 @@ L$xts_dec_four:
+ xorps %xmm12,%xmm4
+ xorps %xmm13,%xmm5
+
+- call _aesni_decrypt4
++ call __aesni_decrypt4
+
+ pxor %xmm10,%xmm2
+ movdqa %xmm14,%xmm10
+@@ -2570,10 +2570,10 @@ L$xts_dec_ret:
+ L$xts_dec_epilogue:
+ .byte 0xf3,0xc3
+
+-.globl _aesni_cbc_encrypt
++.globl __aesni_cbc_encrypt
+
+ .p2align 4
+-_aesni_cbc_encrypt:
++__aesni_cbc_encrypt:
+ testq %rdx,%rdx
+ jz L$cbc_ret
+
+@@ -2914,7 +2914,7 @@ L$cbc_dec_six_or_seven:
+ ja L$cbc_dec_seven
+
+ movaps %xmm7,%xmm8
+- call _aesni_decrypt6
++ call __aesni_decrypt6
+ pxor %xmm10,%xmm2
+ movaps %xmm8,%xmm10
+ pxor %xmm11,%xmm3
+@@ -2940,7 +2940,7 @@ L$cbc_dec_six_or_seven:
+ L$cbc_dec_seven:
+ movups 96(%rdi),%xmm8
+ xorps %xmm9,%xmm9
+- call _aesni_decrypt8
++ call __aesni_decrypt8
+ movups 80(%rdi),%xmm9
+ pxor %xmm10,%xmm2
+ movups 96(%rdi),%xmm10
+@@ -2986,7 +2986,7 @@ L$cbc_dec_loop6_enter:
+ leaq 96(%rdi),%rdi
+ movdqa %xmm7,%xmm8
+
+- call _aesni_decrypt6
++ call __aesni_decrypt6
+
+ pxor %xmm10,%xmm2
+ movdqa %xmm8,%xmm10
+@@ -3036,7 +3036,7 @@ L$cbc_dec_tail:
+ movaps %xmm5,%xmm14
+ movaps %xmm6,%xmm15
+ xorps %xmm7,%xmm7
+- call _aesni_decrypt6
++ call __aesni_decrypt6
+ pxor %xmm10,%xmm2
+ movaps %xmm15,%xmm10
+ pxor %xmm11,%xmm3
+@@ -3077,7 +3077,7 @@ L$oop_dec1_17:
+ .p2align 4
+ L$cbc_dec_two:
+ movaps %xmm3,%xmm12
+- call _aesni_decrypt2
++ call __aesni_decrypt2
+ pxor %xmm10,%xmm2
+ movaps %xmm12,%xmm10
+ pxor %xmm11,%xmm3
+@@ -3089,7 +3089,7 @@ L$cbc_dec_two:
+ .p2align 4
+ L$cbc_dec_three:
+ movaps %xmm4,%xmm13
+- call _aesni_decrypt3
++ call __aesni_decrypt3
+ pxor %xmm10,%xmm2
+ movaps %xmm13,%xmm10
+ pxor %xmm11,%xmm3
+@@ -3104,7 +3104,7 @@ L$cbc_dec_three:
+ .p2align 4
+ L$cbc_dec_four:
+ movaps %xmm5,%xmm14
+- call _aesni_decrypt4
++ call __aesni_decrypt4
+ pxor %xmm10,%xmm2
+ movaps %xmm14,%xmm10
+ pxor %xmm11,%xmm3
+@@ -3155,12 +3155,12 @@ L$cbc_dec_ret:
+ L$cbc_ret:
+ .byte 0xf3,0xc3
+
+-.globl _aesni_set_decrypt_key
++.globl __aesni_set_decrypt_key
+
+ .p2align 4
+-_aesni_set_decrypt_key:
++__aesni_set_decrypt_key:
+ .byte 0x48,0x83,0xEC,0x08
+- call __aesni_set_encrypt_key
++ call ___aesni_set_encrypt_key
+ shll $4,%esi
+ testl %eax,%eax
+ jnz L$dec_key_ret
+@@ -3195,11 +3195,11 @@ L$dec_key_ret:
+ .byte 0xf3,0xc3
+ L$SEH_end_set_decrypt_key:
+
+-.globl _aesni_set_encrypt_key
++.globl __aesni_set_encrypt_key
+
+ .p2align 4
+-_aesni_set_encrypt_key:
+ __aesni_set_encrypt_key:
++___aesni_set_encrypt_key:
+ .byte 0x48,0x83,0xEC,0x08
+ movq $-1,%rax
+ testq %rdi,%rdi
+diff --git a/lib/accelerated/x86/macosx/sha1-ssse3-x86.s b/lib/accelerated/x86/macosx/sha1-ssse3-x86.s
+index 8e01010..c1c3546 100644
+--- a/lib/accelerated/x86/macosx/sha1-ssse3-x86.s
++++ b/lib/accelerated/x86/macosx/sha1-ssse3-x86.s
+@@ -39,10 +39,10 @@
+ #
+ .file "sha1-586.s"
+ .text
+-.globl _sha1_block_data_order
++.globl __sha1_block_data_order
+ .align 4
+-_sha1_block_data_order:
+-L_sha1_block_data_order_begin:
++__sha1_block_data_order:
++L__sha1_block_data_order_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
diff --git a/lib/accelerated/x86/macosx/sha1-ssse3-x86_64.s b/lib/accelerated/x86/macosx/sha1-ssse3-x86_64.s
-index 79c10de..5ec4ffe 100644
+index 79c10de..dc1e16d 100644
--- a/lib/accelerated/x86/macosx/sha1-ssse3-x86_64.s
+++ b/lib/accelerated/x86/macosx/sha1-ssse3-x86_64.s
+@@ -40,10 +40,10 @@
+ .text
+
+
+-.globl _sha1_block_data_order
++.globl __sha1_block_data_order
+
+ .p2align 4
+-_sha1_block_data_order:
++__sha1_block_data_order:
+ movl __gnutls_x86_cpuid_s+0(%rip),%r9d
+ movl __gnutls_x86_cpuid_s+4(%rip),%r8d
+ testl $512,%r8d
@@ -1329,7 +1329,7 @@ L$epilogue:
.p2align 4
-sha1_block_data_order_ssse3:
-+_sha1_block_data_order_ssse3:
++__sha1_block_data_order_ssse3:
_ssse3_shortcut:
pushq %rbx
pushq %rbp
+diff --git a/lib/accelerated/x86/macosx/sha256-ssse3-x86.s b/lib/accelerated/x86/macosx/sha256-ssse3-x86.s
+index 300212c..469ed47 100644
+--- a/lib/accelerated/x86/macosx/sha256-ssse3-x86.s
++++ b/lib/accelerated/x86/macosx/sha256-ssse3-x86.s
+@@ -39,10 +39,10 @@
+ #
+ .file "sha512-586.s"
+ .text
+-.globl _sha256_block_data_order
++.globl __sha256_block_data_order
+ .align 4
+-_sha256_block_data_order:
+-L_sha256_block_data_order_begin:
++__sha256_block_data_order:
++L__sha256_block_data_order_begin:
+ pushl %ebp
+ pushl %ebx
+ pushl %esi
diff --git a/lib/accelerated/x86/macosx/sha512-ssse3-x86_64.s b/lib/accelerated/x86/macosx/sha512-ssse3-x86_64.s
-index 7e73227..be79615 100644
+index 7e73227..5ff6af1 100644
--- a/lib/accelerated/x86/macosx/sha512-ssse3-x86_64.s
+++ b/lib/accelerated/x86/macosx/sha512-ssse3-x86_64.s
+@@ -40,10 +40,10 @@
+ .text
+
+
+-.globl _sha256_block_data_order
++.globl __sha256_block_data_order
+
+ .p2align 4
+-_sha256_block_data_order:
++__sha256_block_data_order:
+ leaq __gnutls_x86_cpuid_s(%rip),%r11
+ movl 0(%r11),%r9d
+ movl 4(%r11),%r10d
@@ -1801,7 +1801,7 @@ K256:
.byte 83,72,65,50,53,54,32,98,108,111,99,107,32,116,114,97,110,115,102,111,114,109,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.p2align 6
-sha256_block_data_order_ssse3:
-+_sha256_block_data_order_ssse3:
++__sha256_block_data_order_ssse3:
L$ssse3_shortcut:
pushq %rbx
pushq %rbp
@@ -1870,5 +2960,5 @@ index d73039e..ec7f7c0 100644
for (i=0;i<t2;i++)
MD_INCR(ctx);
--
-2.16.2
+2.14.3 (Apple Git-98)