linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Dave Watson <davejwatson@fb.com>
To: Herbert Xu <herbert@gondor.apana.org.au>,
	Junaid Shahid <junaids@google.com>,
	Steffen Klassert <steffen.klassert@secunet.com>,
	"linux-crypto@vger.kernel.org" <linux-crypto@vger.kernel.org>
Cc: Doron Roberts-Kedes <doronrk@fb.com>,
	Sabrina Dubroca <sd@queasysnail.net>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Stephan Mueller <smueller@chronox.de>
Subject: [PATCH 03/12] x86/crypto: aesni: Macro-ify func save/restore
Date: Mon, 10 Dec 2018 19:57:12 +0000	[thread overview]
Message-ID: <08b21e5705b45fe799463d4fa708c7b73fa24625.1544471415.git.davejwatson@fb.com> (raw)
In-Reply-To: <cover.1544471415.git.davejwatson@fb.com>

Macro-ify function save and restore.  These will be used in new functions
added for scatter/gather update operations.

Signed-off-by: Dave Watson <davejwatson@fb.com>
---
 arch/x86/crypto/aesni-intel_avx-x86_64.S | 94 +++++++++---------------
 1 file changed, 36 insertions(+), 58 deletions(-)

diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 284f1b8b88fc..dd895f69399b 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -247,6 +247,30 @@ VARIABLE_OFFSET = 16*8
 # Utility Macros
 ################################
 
+.macro FUNC_SAVE
+        #the number of pushes must equal STACK_OFFSET
+        push    %r12
+        push    %r13
+        push    %r14
+        push    %r15
+
+        mov     %rsp, %r14
+
+
+
+        sub     $VARIABLE_OFFSET, %rsp
+        and     $~63, %rsp                    # align rsp to 64 bytes
+.endm
+
+.macro FUNC_RESTORE
+        mov     %r14, %rsp
+
+        pop     %r15
+        pop     %r14
+        pop     %r13
+        pop     %r12
+.endm
+
 # Encryption of a single block
 .macro ENCRYPT_SINGLE_BLOCK XMM0
                 vpxor    (arg1), \XMM0, \XMM0
@@ -264,22 +288,6 @@ VARIABLE_OFFSET = 16*8
 # clobbering all xmm registers
 # clobbering r10, r11, r12, r13, r14, r15
 .macro  GCM_ENC_DEC INITIAL_BLOCKS GHASH_8_ENCRYPT_8_PARALLEL GHASH_LAST_8 GHASH_MUL ENC_DEC
-
-        #the number of pushes must equal STACK_OFFSET
-        push    %r12
-        push    %r13
-        push    %r14
-        push    %r15
-
-        mov     %rsp, %r14
-
-
-
-
-        sub     $VARIABLE_OFFSET, %rsp
-        and     $~63, %rsp                  # align rsp to 64 bytes
-
-
         vmovdqu  HashKey(arg2), %xmm13      # xmm13 = HashKey
 
         mov     arg5, %r13                  # save the number of bytes of plaintext/ciphertext
@@ -566,12 +574,6 @@ _T_16\@:
         vmovdqu %xmm9, (%r10)
 
 _return_T_done\@:
-        mov     %r14, %rsp
-
-        pop     %r15
-        pop     %r14
-        pop     %r13
-        pop     %r12
 .endm
 
 #ifdef CONFIG_AS_AVX
@@ -1511,18 +1513,7 @@ _initial_blocks_done\@:
 #        u8     *hash_subkey)# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */
 #############################################################
 ENTRY(aesni_gcm_precomp_avx_gen2)
-        #the number of pushes must equal STACK_OFFSET
-        push    %r12
-        push    %r13
-        push    %r14
-        push    %r15
-
-        mov     %rsp, %r14
-
-
-
-        sub     $VARIABLE_OFFSET, %rsp
-        and     $~63, %rsp                  # align rsp to 64 bytes
+        FUNC_SAVE
 
         vmovdqu  (arg3), %xmm6              # xmm6 = HashKey
 
@@ -1546,12 +1537,7 @@ ENTRY(aesni_gcm_precomp_avx_gen2)
 
         PRECOMPUTE_AVX  %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
 
-        mov     %r14, %rsp
-
-        pop     %r15
-        pop     %r14
-        pop     %r13
-        pop     %r12
+        FUNC_RESTORE
         ret
 ENDPROC(aesni_gcm_precomp_avx_gen2)
 
@@ -1573,7 +1559,9 @@ ENDPROC(aesni_gcm_precomp_avx_gen2)
 #				Valid values are 16 (most likely), 12 or 8. */
 ###############################################################################
 ENTRY(aesni_gcm_enc_avx_gen2)
+        FUNC_SAVE
         GCM_ENC_DEC INITIAL_BLOCKS_AVX GHASH_8_ENCRYPT_8_PARALLEL_AVX GHASH_LAST_8_AVX GHASH_MUL_AVX ENC
+        FUNC_RESTORE
 	ret
 ENDPROC(aesni_gcm_enc_avx_gen2)
 
@@ -1595,7 +1583,9 @@ ENDPROC(aesni_gcm_enc_avx_gen2)
 #				Valid values are 16 (most likely), 12 or 8. */
 ###############################################################################
 ENTRY(aesni_gcm_dec_avx_gen2)
+        FUNC_SAVE
         GCM_ENC_DEC INITIAL_BLOCKS_AVX GHASH_8_ENCRYPT_8_PARALLEL_AVX GHASH_LAST_8_AVX GHASH_MUL_AVX DEC
+        FUNC_RESTORE
 	ret
 ENDPROC(aesni_gcm_dec_avx_gen2)
 #endif /* CONFIG_AS_AVX */
@@ -2525,18 +2515,7 @@ _initial_blocks_done\@:
 #				Data starts on a 16-byte boundary. */
 #############################################################
 ENTRY(aesni_gcm_precomp_avx_gen4)
-        #the number of pushes must equal STACK_OFFSET
-        push    %r12
-        push    %r13
-        push    %r14
-        push    %r15
-
-        mov     %rsp, %r14
-
-
-
-        sub     $VARIABLE_OFFSET, %rsp
-        and     $~63, %rsp                    # align rsp to 64 bytes
+        FUNC_SAVE
 
         vmovdqu  (arg3), %xmm6                # xmm6 = HashKey
 
@@ -2560,12 +2539,7 @@ ENTRY(aesni_gcm_precomp_avx_gen4)
 
         PRECOMPUTE_AVX2  %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
 
-        mov     %r14, %rsp
-
-        pop     %r15
-        pop     %r14
-        pop     %r13
-        pop     %r12
+        FUNC_RESTORE
         ret
 ENDPROC(aesni_gcm_precomp_avx_gen4)
 
@@ -2588,7 +2562,9 @@ ENDPROC(aesni_gcm_precomp_avx_gen4)
 #				Valid values are 16 (most likely), 12 or 8. */
 ###############################################################################
 ENTRY(aesni_gcm_enc_avx_gen4)
+        FUNC_SAVE
         GCM_ENC_DEC INITIAL_BLOCKS_AVX2 GHASH_8_ENCRYPT_8_PARALLEL_AVX2 GHASH_LAST_8_AVX2 GHASH_MUL_AVX2 ENC
+        FUNC_RESTORE
 	ret
 ENDPROC(aesni_gcm_enc_avx_gen4)
 
@@ -2610,7 +2586,9 @@ ENDPROC(aesni_gcm_enc_avx_gen4)
 #				Valid values are 16 (most likely), 12 or 8. */
 ###############################################################################
 ENTRY(aesni_gcm_dec_avx_gen4)
+        FUNC_SAVE
         GCM_ENC_DEC INITIAL_BLOCKS_AVX2 GHASH_8_ENCRYPT_8_PARALLEL_AVX2 GHASH_LAST_8_AVX2 GHASH_MUL_AVX2 DEC
+        FUNC_RESTORE
 	ret
 ENDPROC(aesni_gcm_dec_avx_gen4)
 
-- 
2.17.1


  parent reply	other threads:[~2018-12-10 19:57 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-10 19:56 [PATCH 00/12] x86/crypto: gcmaes AVX scatter/gather support Dave Watson
2018-12-10 19:56 ` [PATCH 01/12] x86/crypto: aesni: Merge GCM_ENC_DEC Dave Watson
2018-12-10 19:57 ` [PATCH 02/12] x86/crypto: aesni: Introduce gcm_context_data Dave Watson
2018-12-10 19:57 ` Dave Watson [this message]
2018-12-10 19:57 ` [PATCH 04/12] x86/crypto: aesni: support 256 byte keys in avx asm Dave Watson
2018-12-10 19:57 ` [PATCH 05/12] x86/crypto: aesni: Add GCM_COMPLETE macro Dave Watson
2018-12-10 19:58 ` [PATCH 06/12] x86/crypto: aesni: Split AAD hash calculation to separate macro Dave Watson
2018-12-10 19:58 ` [PATCH 07/12] x86/crypto: aesni: Merge avx precompute functions Dave Watson
2018-12-10 19:58 ` [PATCH 08/12] x86/crypto: aesni: Fill in new context data structures Dave Watson
2018-12-10 19:59 ` [PATCH 09/12] x86/crypto: aesni: Move ghash_mul to GCM_COMPLETE Dave Watson
2018-12-10 19:59 ` [PATCH 10/12] x86/crypto: aesni: Introduce READ_PARTIAL_BLOCK macro Dave Watson
2018-12-10 19:59 ` [PATCH 11/12] x86/crypto: aesni: Introduce partial block macro Dave Watson
2018-12-10 19:59 ` [PATCH 12/12] x86/crypto: aesni: Add scatter/gather avx stubs, and use them in C Dave Watson
2018-12-23  4:01 ` [PATCH 00/12] x86/crypto: gcmaes AVX scatter/gather support Herbert Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=08b21e5705b45fe799463d4fa708c7b73fa24625.1544471415.git.davejwatson@fb.com \
    --to=davejwatson@fb.com \
    --cc=doronrk@fb.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=junaids@google.com \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=sd@queasysnail.net \
    --cc=smueller@chronox.de \
    --cc=steffen.klassert@secunet.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).