linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <peterz@infradead.org>
To: "Chang S. Bae" <chang.seok.bae@intel.com>
Cc: tglx@linutronix.de, mingo@kernel.org, bp@suse.de,
	luto@kernel.org, x86@kernel.org, herbert@gondor.apana.org.au,
	dan.j.williams@intel.com, dave.hansen@intel.com,
	ravi.v.shankar@intel.com, ning.sun@intel.com,
	kumar.n.dwarakanath@intel.com, linux-crypto@vger.kernel.org,
	linux-kernel@vger.kernel.org
Subject: Re: [RFC PATCH 7/8] crypto: x86/aes-kl - Support AES algorithm using Key Locker instructions
Date: Fri, 18 Dec 2020 11:11:48 +0100	[thread overview]
Message-ID: <20201218101148.GF3021@hirez.programming.kicks-ass.net> (raw)
In-Reply-To: <20201216174146.10446-8-chang.seok.bae@intel.com>

On Wed, Dec 16, 2020 at 09:41:45AM -0800, Chang S. Bae wrote:
> diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
> index bd7f02480ca1..b719a11a2905 100644
> --- a/arch/x86/include/asm/inst.h
> +++ b/arch/x86/include/asm/inst.h
> @@ -122,9 +122,62 @@
>  #endif
>  	.endm
>  
> +	.macro XMM_NUM opd xmm
> +	\opd = REG_NUM_INVALID
> +	.ifc \xmm,%xmm0
> +	\opd = 0
> +	.endif
> +	.ifc \xmm,%xmm1
> +	\opd = 1
> +	.endif
> +	.ifc \xmm,%xmm2
> +	\opd = 2
> +	.endif
> +	.ifc \xmm,%xmm3
> +	\opd = 3
> +	.endif
> +	.ifc \xmm,%xmm4
> +	\opd = 4
> +	.endif
> +	.ifc \xmm,%xmm5
> +	\opd = 5
> +	.endif
> +	.ifc \xmm,%xmm6
> +	\opd = 6
> +	.endif
> +	.ifc \xmm,%xmm7
> +	\opd = 7
> +	.endif
> +	.ifc \xmm,%xmm8
> +	\opd = 8
> +	.endif
> +	.ifc \xmm,%xmm9
> +	\opd = 9
> +	.endif
> +	.ifc \xmm,%xmm10
> +	\opd = 10
> +	.endif
> +	.ifc \xmm,%xmm11
> +	\opd = 11
> +	.endif
> +	.ifc \xmm,%xmm12
> +	\opd = 12
> +	.endif
> +	.ifc \xmm,%xmm13
> +	\opd = 13
> +	.endif
> +	.ifc \xmm,%xmm14
> +	\opd = 14
> +	.endif
> +	.ifc \xmm,%xmm15
> +	\opd = 15
> +	.endif
> +	.endm
> +
>  	.macro REG_TYPE type reg
>  	R32_NUM reg_type_r32 \reg
>  	R64_NUM reg_type_r64 \reg
> +	XMM_NUM reg_type_xmm \reg
>  	.if reg_type_r64 <> REG_NUM_INVALID
>  	\type = REG_TYPE_R64
>  	.elseif reg_type_r32 <> REG_NUM_INVALID
> @@ -134,6 +187,14 @@
>  	.endif
>  	.endm
>  
> +	.macro PFX_OPD_SIZE
> +	.byte 0x66
> +	.endm
> +
> +	.macro PFX_RPT
> +	.byte 0xf3
> +	.endm
> +
>  	.macro PFX_REX opd1 opd2 W=0
>  	.if ((\opd1 | \opd2) & 8) || \W
>  	.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
> @@ -158,6 +219,146 @@
>  	.byte 0x0f, 0xc7
>  	MODRM 0xc0 rdpid_opd 0x7
>  .endm
> +
> +	.macro ENCODEKEY128 reg1 reg2
> +	R32_NUM encodekey128_opd1 \reg1
> +	R32_NUM encodekey128_opd2 \reg2
> +	PFX_RPT
> +	.byte 0xf, 0x38, 0xfa
> +	MODRM 0xc0  encodekey128_opd2 encodekey128_opd1
> +	.endm
> +
> +	.macro ENCODEKEY256 reg1 reg2
> +	R32_NUM encodekey256_opd1 \reg1
> +	R32_NUM encodekey256_opd2 \reg2
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xfb
> +	MODRM 0xc0 encodekey256_opd1 encodekey256_opd2
> +	.endm
> +
> +	.macro AESENC128KL reg, xmm
> +	REG_TYPE aesenc128kl_opd1_type \reg
> +	.if aesenc128kl_opd1_type == REG_TYPE_R64
> +	R64_NUM aesenc128kl_opd1 \reg
> +	.elseif aesenc128kl_opd1_type == REG_TYPE_R32
> +	R32_NUM aesenc128kl_opd1 \reg
> +	.else
> +	aesenc128kl_opd1 = REG_NUM_INVALID
> +	.endif
> +	XMM_NUM aesenc128kl_opd2 \xmm
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xdc
> +	MODRM 0x0 aesenc128kl_opd1 aesenc128kl_opd2
> +	.endm
> +
> +	.macro AESDEC128KL reg, xmm
> +	REG_TYPE aesdec128kl_opd1_type \reg
> +	.if aesdec128kl_opd1_type == REG_TYPE_R64
> +	R64_NUM aesdec128kl_opd1 \reg
> +	.elseif aesdec128kl_opd1_type == REG_TYPE_R32
> +	R32_NUM aesdec128kl_opd1 \reg
> +	.else
> +	aesdec128kl_opd1 = REG_NUM_INVALID
> +	.endif
> +	XMM_NUM aesdec128kl_opd2 \xmm
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xdd
> +	MODRM 0x0 aesdec128kl_opd1 aesdec128kl_opd2
> +	.endm
> +
> +	.macro AESENC256KL reg, xmm
> +	REG_TYPE aesenc256kl_opd1_type \reg
> +	.if aesenc256kl_opd1_type == REG_TYPE_R64
> +	R64_NUM aesenc256kl_opd1 \reg
> +	.elseif aesenc256kl_opd1_type == REG_TYPE_R32
> +	R32_NUM aesenc256kl_opd1 \reg
> +	.else
> +	aesenc256kl_opd1 = REG_NUM_INVALID
> +	.endif
> +	XMM_NUM aesenc256kl_opd2 \xmm
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xde
> +	MODRM 0x0 aesenc256kl_opd1 aesenc256kl_opd2
> +	.endm
> +
> +	.macro AESDEC256KL reg, xmm
> +	REG_TYPE aesdec256kl_opd1_type \reg
> +	.if aesdec256kl_opd1_type == REG_TYPE_R64
> +	R64_NUM aesdec256kl_opd1 \reg
> +	.elseif aesdec256kl_opd1_type == REG_TYPE_R32
> +	R32_NUM aesdec256kl_opd1 \reg
> +	.else
> +	aesdec256kl_opd1 = REG_NUM_INVALID
> +	.endif
> +	XMM_NUM aesdec256kl_opd2 \xmm
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xdf
> +	MODRM 0x0 aesdec256kl_opd1 aesdec256kl_opd2
> +	.endm
> +
> +	.macro AESENCWIDE128KL reg
> +	REG_TYPE aesencwide128kl_opd1_type \reg
> +	.if aesencwide128kl_opd1_type == REG_TYPE_R64
> +	R64_NUM aesencwide128kl_opd1 \reg
> +	.elseif aesencwide128kl_opd1_type == REG_TYPE_R32
> +	R32_NUM aesencwide128kl_opd1 \reg
> +	.else
> +	aesencwide128kl_opd1 = REG_NUM_INVALID
> +	.endif
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xd8
> +	MODRM 0x0 aesencwide128kl_opd1 0x0
> +	.endm
> +
> +	.macro AESDECWIDE128KL reg
> +	REG_TYPE aesdecwide128kl_opd1_type \reg
> +	.if aesdecwide128kl_opd1_type == REG_TYPE_R64
> +	R64_NUM aesdecwide128kl_opd1 \reg
> +	.elseif aesdecwide128kl_opd1_type == REG_TYPE_R32
> +	R32_NUM aesdecwide128kl_opd1 \reg
> +	.else
> +	aesdecwide128kl_opd1 = REG_NUM_INVALID
> +	.endif
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xd8
> +	MODRM 0x0 aesdecwide128kl_opd1 0x1
> +	.endm
> +
> +	.macro AESENCWIDE256KL reg
> +	REG_TYPE aesencwide256kl_opd1_type \reg
> +	.if aesencwide256kl_opd1_type == REG_TYPE_R64
> +	R64_NUM aesencwide256kl_opd1 \reg
> +	.elseif aesencwide256kl_opd1_type == REG_TYPE_R32
> +	R32_NUM aesencwide256kl_opd1 \reg
> +	.else
> +	aesencwide256kl_opd1 = REG_NUM_INVALID
> +	.endif
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xd8
> +	MODRM 0x0 aesencwide256kl_opd1 0x2
> +	.endm
> +
> +	.macro AESDECWIDE256KL reg
> +	REG_TYPE aesdecwide256kl_opd1_type \reg
> +	.if aesdecwide256kl_opd1_type == REG_TYPE_R64
> +	R64_NUM aesdecwide256kl_opd1 \reg
> +	.elseif aesdecwide256kl_opd1_type == REG_TYPE_R32
> +	R32_NUM aesdecwide256kl_opd1 \reg
> +	.else
> +	aesdecwide256kl_opd1 = REG_NUM_INVALID
> +	.endif
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xd8
> +	MODRM 0x0 aesdecwide256kl_opd1 0x3
> +	.endm
> +
> +	.macro LOADIWKEY xmm1, xmm2
> +	XMM_NUM loadiwkey_opd1 \xmm1
> +	XMM_NUM loadiwkey_opd2 \xmm2
> +	PFX_RPT
> +	.byte 0x0f, 0x38, 0xdc
> +	MODRM 0xc0 loadiwkey_opd1 loadiwkey_opd2
> +	.endm
>  #endif
>  
>  #endif

*groan*, so what actual version of binutils is needed and why is this
driver important enough to build on ancient crud to warrant all this
gunk?


  parent reply	other threads:[~2020-12-18 10:12 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-16 17:41 [RFC PATCH 0/8] x86: Support Intel Key Locker Chang S. Bae
2020-12-16 17:41 ` [RFC PATCH 1/8] x86/cpufeature: Enumerate Key Locker feature Chang S. Bae
2020-12-16 17:41 ` [RFC PATCH 2/8] x86/cpu: Load Key Locker internal key at boot-time Chang S. Bae
2020-12-16 17:41 ` [RFC PATCH 3/8] x86/msr-index: Add MSRs for Key Locker internal key Chang S. Bae
2020-12-16 17:41 ` [RFC PATCH 4/8] x86/power: Restore Key Locker internal key from the ACPI S3/4 sleep states Chang S. Bae
2020-12-17 19:10   ` Eric Biggers
2020-12-18  1:00     ` Bae, Chang Seok
2021-01-28 10:34   ` Rafael J. Wysocki
2021-01-28 16:10     ` Bae, Chang Seok
2020-12-16 17:41 ` [RFC PATCH 5/8] x86/cpu: Add a config option and a chicken bit for Key Locker Chang S. Bae
2020-12-16 17:41 ` [RFC PATCH 6/8] selftests/x86: Test Key Locker internal key maintenance Chang S. Bae
2020-12-18  9:59   ` Peter Zijlstra
2020-12-18 10:43     ` Bae, Chang Seok
2020-12-16 17:41 ` [RFC PATCH 7/8] crypto: x86/aes-kl - Support AES algorithm using Key Locker instructions Chang S. Bae
2020-12-17 10:16   ` Ard Biesheuvel
2021-05-14 20:36     ` Bae, Chang Seok
2020-12-17 20:54   ` Andy Lutomirski
2021-05-14 20:48     ` Bae, Chang Seok
2020-12-17 20:58   ` [NEEDS-REVIEW] " Dave Hansen
2020-12-18  9:56     ` Peter Zijlstra
2020-12-18 10:11   ` Peter Zijlstra [this message]
2020-12-18 10:34     ` Bae, Chang Seok
2020-12-18 11:00       ` Borislav Petkov
2020-12-18 14:33       ` Peter Zijlstra
2020-12-16 17:41 ` [RFC PATCH 8/8] x86/cpu: Support the hardware randomization option for Key Locker internal key Chang S. Bae
2020-12-17 19:10 ` [RFC PATCH 0/8] x86: Support Intel Key Locker Eric Biggers
2020-12-17 20:07   ` Dan Williams
2020-12-18  1:08   ` Bae, Chang Seok
2020-12-19 18:59 ` Andy Lutomirski
2020-12-22 19:03   ` Bae, Chang Seok

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201218101148.GF3021@hirez.programming.kicks-ass.net \
    --to=peterz@infradead.org \
    --cc=bp@suse.de \
    --cc=chang.seok.bae@intel.com \
    --cc=dan.j.williams@intel.com \
    --cc=dave.hansen@intel.com \
    --cc=herbert@gondor.apana.org.au \
    --cc=kumar.n.dwarakanath@intel.com \
    --cc=linux-crypto@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=luto@kernel.org \
    --cc=mingo@kernel.org \
    --cc=ning.sun@intel.com \
    --cc=ravi.v.shankar@intel.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).