Skip to content
Permalink

Comparing changes

This is a direct comparison between two commits made in this repository or its related repositories. View the default comparison for this range or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: openzfs/zfs
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: 45930c9cfaf09b02d2386e259d020f2964658c70
Choose a base ref
..
head repository: openzfs/zfs
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: ad118637058366c4127ee192f516c745cebe6716
Choose a head ref
Showing with 19 additions and 44 deletions.
  1. +19 −44 module/icp/asm-x86_64/modes/aesni-gcm-avx2.S
63 changes: 19 additions & 44 deletions module/icp/asm-x86_64/modes/aesni-gcm-avx2.S
Original file line number Diff line number Diff line change
@@ -10,12 +10,8 @@
/* Windows userland links with OpenSSL */
#if !defined (_WIN32) || defined (_KERNEL)

#ifndef _CET_ENDBR
#define _CET_ENDBR
#endif

.section .rodata
.align 16
.balign 16


.Lbswap_mask:
@@ -35,7 +31,7 @@
.Lgfpoly_and_internal_carrybit:
.quad 1, 0xc200000000000001

.align 32
.balign 32

.Lctr_pattern:
.quad 0, 0
@@ -44,15 +40,10 @@
.quad 2, 0
.quad 2, 0

.text
.globl gcm_init_vpclmulqdq_avx2
.hidden gcm_init_vpclmulqdq_avx2
.type gcm_init_vpclmulqdq_avx2,@function
.align 32
gcm_init_vpclmulqdq_avx2:
ENTRY_ALIGN(gcm_init_vpclmulqdq_avx2, 32)
.cfi_startproc

_CET_ENDBR
ENDBR



@@ -166,15 +157,11 @@ _CET_ENDBR
RET

.cfi_endproc
.size gcm_init_vpclmulqdq_avx2, . - gcm_init_vpclmulqdq_avx2
.globl gcm_gmult_vpclmulqdq_avx2
.hidden gcm_gmult_vpclmulqdq_avx2
.type gcm_gmult_vpclmulqdq_avx2,@function
.align 32
gcm_gmult_vpclmulqdq_avx2:
SET_SIZE(gcm_init_vpclmulqdq_avx2)
ENTRY_ALIGN(gcm_gmult_vpclmulqdq_avx2, 32)
.cfi_startproc

_CET_ENDBR
ENDBR



@@ -204,15 +191,11 @@ _CET_ENDBR
RET

.cfi_endproc
.size gcm_gmult_vpclmulqdq_avx2, . - gcm_gmult_vpclmulqdq_avx2
.globl gcm_ghash_vpclmulqdq_avx2
.hidden gcm_ghash_vpclmulqdq_avx2
.type gcm_ghash_vpclmulqdq_avx2,@function
.align 32
gcm_ghash_vpclmulqdq_avx2:
SET_SIZE(gcm_gmult_vpclmulqdq_avx2)
ENTRY_ALIGN(gcm_ghash_vpclmulqdq_avx2, 32)
.cfi_startproc

_CET_ENDBR
ENDBR



@@ -365,15 +348,11 @@ _CET_ENDBR
RET

.cfi_endproc
.size gcm_ghash_vpclmulqdq_avx2, . - gcm_ghash_vpclmulqdq_avx2
.globl aes_gcm_enc_update_vaes_avx2
.hidden aes_gcm_enc_update_vaes_avx2
.type aes_gcm_enc_update_vaes_avx2,@function
.align 32
aes_gcm_enc_update_vaes_avx2:
SET_SIZE(gcm_ghash_vpclmulqdq_avx2)
ENTRY_ALIGN(aes_gcm_enc_update_vaes_avx2, 32)
.cfi_startproc

_CET_ENDBR
ENDBR
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
@@ -462,7 +441,7 @@ _CET_ENDBR
addq $-128,%rdx
cmpq $127,%rdx
jbe .Lghash_last_ciphertext_4x__func1
.align 16
.balign 16
.Lcrypt_loop_4x__func1:


@@ -906,15 +885,11 @@ _CET_ENDBR
RET

.cfi_endproc
.size aes_gcm_enc_update_vaes_avx2, . - aes_gcm_enc_update_vaes_avx2
.globl aes_gcm_dec_update_vaes_avx2
.hidden aes_gcm_dec_update_vaes_avx2
.type aes_gcm_dec_update_vaes_avx2,@function
.align 32
aes_gcm_dec_update_vaes_avx2:
SET_SIZE(aes_gcm_enc_update_vaes_avx2)
ENTRY_ALIGN(aes_gcm_dec_update_vaes_avx2, 32)
.cfi_startproc

_CET_ENDBR
ENDBR
pushq %r12
.cfi_adjust_cfa_offset 8
.cfi_offset %r12,-16
@@ -951,7 +926,7 @@ _CET_ENDBR

vmovdqu 128(%r9),%ymm7
vmovdqu 128+32(%r9),%ymm8
.align 16
.balign 16
.Lcrypt_loop_4x__func2:


@@ -1328,7 +1303,7 @@ _CET_ENDBR
RET

.cfi_endproc
.size aes_gcm_dec_update_vaes_avx2, . - aes_gcm_dec_update_vaes_avx2
SET_SIZE(aes_gcm_dec_update_vaes_avx2)

#endif /* !_WIN32 || _KERNEL */