From beefa4c067d05b7419fbe1f4d0ac9ee641d0bef1 Mon Sep 17 00:00:00 2001 From: Packit Service Date: Dec 09 2020 19:00:51 +0000 Subject: Apply patch libgcrypt-1.8.5-intel-cet.patch patch_name: libgcrypt-1.8.5-intel-cet.patch present_in_specfile: true --- diff --git a/cipher/camellia-aesni-avx-amd64.S b/cipher/camellia-aesni-avx-amd64.S index 8022934..c4bd298 100644 --- a/cipher/camellia-aesni-avx-amd64.S +++ b/cipher/camellia-aesni-avx-amd64.S @@ -18,8 +18,9 @@ * License along with this program; if not, see . */ -#ifdef __x86_64 #include + +#ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX_SUPPORT) diff --git a/cipher/camellia-aesni-avx2-amd64.S b/cipher/camellia-aesni-avx2-amd64.S index 897e4ae..5fdf7e8 100644 --- a/cipher/camellia-aesni-avx2-amd64.S +++ b/cipher/camellia-aesni-avx2-amd64.S @@ -18,8 +18,9 @@ * License along with this program; if not, see . */ -#ifdef __x86_64 #include + +#ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \ defined(ENABLE_AESNI_SUPPORT) && defined(ENABLE_AVX2_SUPPORT) diff --git a/cipher/chacha20-avx2-amd64.S b/cipher/chacha20-avx2-amd64.S index 8c085ba..796aa38 100644 --- a/cipher/chacha20-avx2-amd64.S +++ b/cipher/chacha20-avx2-amd64.S @@ -48,6 +48,9 @@ .globl _gcry_chacha20_amd64_avx2_blocks ELF(.type _gcry_chacha20_amd64_avx2_blocks,@function;) _gcry_chacha20_amd64_avx2_blocks: +#ifdef _CET_ENDBR + _CET_ENDBR +#endif .Lchacha_blocks_avx2_local: vzeroupper pushq %rbx diff --git a/cipher/chacha20-sse2-amd64.S b/cipher/chacha20-sse2-amd64.S index 2b9842c..cb7add8 100644 --- a/cipher/chacha20-sse2-amd64.S +++ b/cipher/chacha20-sse2-amd64.S @@ -41,6 +41,9 @@ .globl _gcry_chacha20_amd64_sse2_blocks ELF(.type _gcry_chacha20_amd64_sse2_blocks,@function;) _gcry_chacha20_amd64_sse2_blocks: +#ifdef _CET_ENDBR + _CET_ENDBR +#endif .Lchacha_blocks_sse2_local: pushq %rbx pushq %rbp diff --git a/cipher/poly1305-avx2-amd64.S b/cipher/poly1305-avx2-amd64.S index 9362a5a..9dd886a 100644 --- a/cipher/poly1305-avx2-amd64.S +++ b/cipher/poly1305-avx2-amd64.S @@ -43,6 +43,9 @@ .globl _gcry_poly1305_amd64_avx2_init_ext ELF(.type _gcry_poly1305_amd64_avx2_init_ext,@function;) _gcry_poly1305_amd64_avx2_init_ext: +#ifdef _CET_ENDBR + _CET_ENDBR +#endif .Lpoly1305_init_ext_avx2_local: xor %edx, %edx vzeroupper @@ -406,6 +409,9 @@ ELF(.size _gcry_poly1305_amd64_avx2_init_ext,.-_gcry_poly1305_amd64_avx2_init_ex .globl _gcry_poly1305_amd64_avx2_blocks ELF(.type _gcry_poly1305_amd64_avx2_blocks,@function;) _gcry_poly1305_amd64_avx2_blocks: +#ifdef _CET_ENDBR + _CET_ENDBR +#endif .Lpoly1305_blocks_avx2_local: vzeroupper pushq %rbp @@ -732,6 +738,9 @@ ELF(.size _gcry_poly1305_amd64_avx2_blocks,.-_gcry_poly1305_amd64_avx2_blocks;) .globl _gcry_poly1305_amd64_avx2_finish_ext ELF(.type _gcry_poly1305_amd64_avx2_finish_ext,@function;) _gcry_poly1305_amd64_avx2_finish_ext: +#ifdef _CET_ENDBR + _CET_ENDBR +#endif .Lpoly1305_finish_ext_avx2_local: vzeroupper pushq %rbp diff --git a/cipher/poly1305-sse2-amd64.S b/cipher/poly1305-sse2-amd64.S index 219eb07..41163c9 100644 --- a/cipher/poly1305-sse2-amd64.S +++ b/cipher/poly1305-sse2-amd64.S @@ -42,6 +42,9 @@ .globl _gcry_poly1305_amd64_sse2_init_ext ELF(.type _gcry_poly1305_amd64_sse2_init_ext,@function;) _gcry_poly1305_amd64_sse2_init_ext: +#ifdef _CET_ENDBR + _CET_ENDBR +#endif .Lpoly1305_init_ext_x86_local: xor %edx, %edx pushq %r12 @@ -288,6 +291,9 @@ ELF(.size _gcry_poly1305_amd64_sse2_init_ext,.-_gcry_poly1305_amd64_sse2_init_ex .globl _gcry_poly1305_amd64_sse2_finish_ext ELF(.type _gcry_poly1305_amd64_sse2_finish_ext,@function;) _gcry_poly1305_amd64_sse2_finish_ext: +#ifdef _CET_ENDBR + _CET_ENDBR +#endif .Lpoly1305_finish_ext_x86_local: pushq %rbp movq %rsp, %rbp @@ -439,6 +445,9 @@ ELF(.size _gcry_poly1305_amd64_sse2_finish_ext,.-_gcry_poly1305_amd64_sse2_finis .globl _gcry_poly1305_amd64_sse2_blocks ELF(.type _gcry_poly1305_amd64_sse2_blocks,@function;) _gcry_poly1305_amd64_sse2_blocks: +#ifdef _CET_ENDBR + _CET_ENDBR +#endif .Lpoly1305_blocks_x86_local: pushq %rbp movq %rsp, %rbp diff --git a/cipher/serpent-avx2-amd64.S b/cipher/serpent-avx2-amd64.S index 8d60a15..8c132c6 100644 --- a/cipher/serpent-avx2-amd64.S +++ b/cipher/serpent-avx2-amd64.S @@ -18,8 +18,9 @@ * License along with this program; if not, see . */ -#ifdef __x86_64 #include + +#ifdef __x86_64 #if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \ defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && defined(USE_SERPENT) && \ defined(ENABLE_AVX2_SUPPORT) diff --git a/configure.ac b/configure.ac index 97ab209..b7fb62c 100644 --- a/configure.ac +++ b/configure.ac @@ -95,6 +95,12 @@ AH_TOP([ AH_BOTTOM([ #define _GCRYPT_IN_LIBGCRYPT 1 +/* Add .note.gnu.property section for Intel CET in assembler sources + when CET is enabled. */ +#if defined(__ASSEMBLER__) && defined(__CET__) +# include +#endif + /* If the configure check for endianness has been disabled, get it from OS macros. This is intended for making fat binary builds on OS X. */ #ifdef DISABLED_ENDIAN_CHECK diff --git a/mpi/config.links b/mpi/config.links index 3ead4f0..4f43b73 100644 --- a/mpi/config.links +++ b/mpi/config.links @@ -382,6 +382,16 @@ if test x"$mpi_cpu_arch" = x ; then mpi_cpu_arch="unknown" fi +# Add .note.gnu.property section for Intel CET in assembler sources +# when CET is enabled. */ +if test x"$mpi_cpu_arch" = xx86 ; then + cat <> ./mpi/asm-syntax.h + +#if defined(__ASSEMBLER__) && defined(__CET__) +# include +#endif +EOF +fi # Make sysdep.h echo '/* created by config.links - do not edit */' >./mpi/sysdep.h diff --git a/mpi/i386/mpih-add1.S b/mpi/i386/mpih-add1.S index 652b232..daf5086 100644 --- a/mpi/i386/mpih-add1.S +++ b/mpi/i386/mpih-add1.S @@ -52,6 +52,10 @@ C_SYMBOL_NAME(_gcry_mpih_add_n:) movl 20(%esp),%edx /* s2_ptr */ movl 24(%esp),%ecx /* size */ +#if defined __CET__ && (__CET__ & 1) != 0 + pushl %ebx +#endif + movl %ecx,%eax shrl $3,%ecx /* compute count for unrolled loop */ negl %eax @@ -63,6 +67,9 @@ C_SYMBOL_NAME(_gcry_mpih_add_n:) subl %eax,%esi /* ... by a constant when we ... */ subl %eax,%edx /* ... enter the loop */ shrl $2,%eax /* restore previous value */ +#if defined __CET__ && (__CET__ & 1) != 0 + leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */ +#endif #ifdef PIC /* Calculate start address in loop for PIC. Due to limitations in some assemblers, Loop-L0-3 cannot be put into the leal */ @@ -75,29 +82,53 @@ L0: leal (%eax,%eax,8),%eax /* Calculate start address in loop for non-PIC. */ leal (Loop - 3)(%eax,%eax,8),%eax #endif +#if defined __CET__ && (__CET__ & 1) != 0 + addl %ebx,%eax /* Adjust for endbr32 */ +#endif jmp *%eax /* jump into loop */ ALIGN (3) Loop: movl (%esi),%eax adcl (%edx),%eax movl %eax,(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 4(%esi),%eax adcl 4(%edx),%eax movl %eax,4(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 8(%esi),%eax adcl 8(%edx),%eax movl %eax,8(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 12(%esi),%eax adcl 12(%edx),%eax movl %eax,12(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 16(%esi),%eax adcl 16(%edx),%eax movl %eax,16(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 20(%esi),%eax adcl 20(%edx),%eax movl %eax,20(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 24(%esi),%eax adcl 24(%edx),%eax movl %eax,24(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 28(%esi),%eax adcl 28(%edx),%eax movl %eax,28(%edi) @@ -110,6 +141,10 @@ Loop: movl (%esi),%eax sbbl %eax,%eax negl %eax +#if defined __CET__ && (__CET__ & 1) != 0 + popl %ebx +#endif + popl %esi popl %edi ret diff --git a/mpi/i386/mpih-sub1.S b/mpi/i386/mpih-sub1.S index f447f7a..e58fd96 100644 --- a/mpi/i386/mpih-sub1.S +++ b/mpi/i386/mpih-sub1.S @@ -53,6 +53,10 @@ C_SYMBOL_NAME(_gcry_mpih_sub_n:) movl 20(%esp),%edx /* s2_ptr */ movl 24(%esp),%ecx /* size */ +#if defined __CET__ && (__CET__ & 1) != 0 + pushl %ebx +#endif + movl %ecx,%eax shrl $3,%ecx /* compute count for unrolled loop */ negl %eax @@ -64,6 +68,9 @@ C_SYMBOL_NAME(_gcry_mpih_sub_n:) subl %eax,%esi /* ... by a constant when we ... */ subl %eax,%edx /* ... enter the loop */ shrl $2,%eax /* restore previous value */ +#if defined __CET__ && (__CET__ & 1) != 0 + leal -4(,%eax,4),%ebx /* Count for 4-byte endbr32 */ +#endif #ifdef PIC /* Calculate start address in loop for PIC. Due to limitations in some assemblers, Loop-L0-3 cannot be put into the leal */ @@ -76,29 +83,53 @@ L0: leal (%eax,%eax,8),%eax /* Calculate start address in loop for non-PIC. */ leal (Loop - 3)(%eax,%eax,8),%eax #endif +#if defined __CET__ && (__CET__ & 1) != 0 + addl %ebx,%eax /* Adjust for endbr32 */ +#endif jmp *%eax /* jump into loop */ ALIGN (3) Loop: movl (%esi),%eax sbbl (%edx),%eax movl %eax,(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 4(%esi),%eax sbbl 4(%edx),%eax movl %eax,4(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 8(%esi),%eax sbbl 8(%edx),%eax movl %eax,8(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 12(%esi),%eax sbbl 12(%edx),%eax movl %eax,12(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 16(%esi),%eax sbbl 16(%edx),%eax movl %eax,16(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 20(%esi),%eax sbbl 20(%edx),%eax movl %eax,20(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 24(%esi),%eax sbbl 24(%edx),%eax movl %eax,24(%edi) +#ifdef _CET_ENDBR + _CET_ENDBR +#endif movl 28(%esi),%eax sbbl 28(%edx),%eax movl %eax,28(%edi) @@ -111,6 +142,10 @@ Loop: movl (%esi),%eax sbbl %eax,%eax negl %eax +#if defined __CET__ && (__CET__ & 1) != 0 + popl %ebx +#endif + popl %esi popl %edi ret