All of lore.kernel.org
 help / color / mirror / Atom feed
* linux-next: manual merge of the tip tree with the crypto tree
@ 2020-07-17  4:46 Stephen Rothwell
  2020-07-17  6:27 ` Uros Bizjak
  0 siblings, 1 reply; 16+ messages in thread
From: Stephen Rothwell @ 2020-07-17  4:46 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	Herbert Xu, Linux Crypto List
  Cc: Linux Next Mailing List, Linux Kernel Mailing List, Uros Bizjak,
	Chang S. Bae, Sasha Levin

[-- Attachment #1: Type: text/plain, Size: 4279 bytes --]

Hi all,

Today's linux-next merge of the tip tree got a conflict in:

  arch/x86/include/asm/inst.h

between commit:

  d7866e503bdc ("crypto: x86 - Remove include/asm/inst.h")

from the crypto tree and commit:

  eaad981291ee ("x86/entry/64: Introduce the FIND_PERCPU_BASE macro")

from the tip tree.

I fixed it up (I brought the file back but removed what the crypto tree
no longer needed - see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

I think if the crypto tree brought back this file as well (even without
the RDPID macro, it would make this conflict much more manageable.

/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Generate .byte code for some instructions not supported by old
 * binutils.
 */
#ifndef X86_ASM_INST_H
#define X86_ASM_INST_H

#ifdef __ASSEMBLY__

#define REG_NUM_INVALID		100

#define REG_TYPE_R32		0
#define REG_TYPE_R64		1
#define REG_TYPE_XMM		2
#define REG_TYPE_INVALID	100

	.macro R32_NUM opd r32
	\opd = REG_NUM_INVALID
	.ifc \r32,%eax
	\opd = 0
	.endif
	.ifc \r32,%ecx
	\opd = 1
	.endif
	.ifc \r32,%edx
	\opd = 2
	.endif
	.ifc \r32,%ebx
	\opd = 3
	.endif
	.ifc \r32,%esp
	\opd = 4
	.endif
	.ifc \r32,%ebp
	\opd = 5
	.endif
	.ifc \r32,%esi
	\opd = 6
	.endif
	.ifc \r32,%edi
	\opd = 7
	.endif
#ifdef CONFIG_X86_64
	.ifc \r32,%r8d
	\opd = 8
	.endif
	.ifc \r32,%r9d
	\opd = 9
	.endif
	.ifc \r32,%r10d
	\opd = 10
	.endif
	.ifc \r32,%r11d
	\opd = 11
	.endif
	.ifc \r32,%r12d
	\opd = 12
	.endif
	.ifc \r32,%r13d
	\opd = 13
	.endif
	.ifc \r32,%r14d
	\opd = 14
	.endif
	.ifc \r32,%r15d
	\opd = 15
	.endif
#endif
	.endm

	.macro R64_NUM opd r64
	\opd = REG_NUM_INVALID
#ifdef CONFIG_X86_64
	.ifc \r64,%rax
	\opd = 0
	.endif
	.ifc \r64,%rcx
	\opd = 1
	.endif
	.ifc \r64,%rdx
	\opd = 2
	.endif
	.ifc \r64,%rbx
	\opd = 3
	.endif
	.ifc \r64,%rsp
	\opd = 4
	.endif
	.ifc \r64,%rbp
	\opd = 5
	.endif
	.ifc \r64,%rsi
	\opd = 6
	.endif
	.ifc \r64,%rdi
	\opd = 7
	.endif
	.ifc \r64,%r8
	\opd = 8
	.endif
	.ifc \r64,%r9
	\opd = 9
	.endif
	.ifc \r64,%r10
	\opd = 10
	.endif
	.ifc \r64,%r11
	\opd = 11
	.endif
	.ifc \r64,%r12
	\opd = 12
	.endif
	.ifc \r64,%r13
	\opd = 13
	.endif
	.ifc \r64,%r14
	\opd = 14
	.endif
	.ifc \r64,%r15
	\opd = 15
	.endif
#endif
	.endm

	.macro XMM_NUM opd xmm
	\opd = REG_NUM_INVALID
	.ifc \xmm,%xmm0
	\opd = 0
	.endif
	.ifc \xmm,%xmm1
	\opd = 1
	.endif
	.ifc \xmm,%xmm2
	\opd = 2
	.endif
	.ifc \xmm,%xmm3
	\opd = 3
	.endif
	.ifc \xmm,%xmm4
	\opd = 4
	.endif
	.ifc \xmm,%xmm5
	\opd = 5
	.endif
	.ifc \xmm,%xmm6
	\opd = 6
	.endif
	.ifc \xmm,%xmm7
	\opd = 7
	.endif
	.ifc \xmm,%xmm8
	\opd = 8
	.endif
	.ifc \xmm,%xmm9
	\opd = 9
	.endif
	.ifc \xmm,%xmm10
	\opd = 10
	.endif
	.ifc \xmm,%xmm11
	\opd = 11
	.endif
	.ifc \xmm,%xmm12
	\opd = 12
	.endif
	.ifc \xmm,%xmm13
	\opd = 13
	.endif
	.ifc \xmm,%xmm14
	\opd = 14
	.endif
	.ifc \xmm,%xmm15
	\opd = 15
	.endif
	.endm

	.macro REG_TYPE type reg
	R32_NUM reg_type_r32 \reg
	R64_NUM reg_type_r64 \reg
	XMM_NUM reg_type_xmm \reg
	.if reg_type_r64 <> REG_NUM_INVALID
	\type = REG_TYPE_R64
	.elseif reg_type_r32 <> REG_NUM_INVALID
	\type = REG_TYPE_R32
	.elseif reg_type_xmm <> REG_NUM_INVALID
	\type = REG_TYPE_XMM
	.else
	\type = REG_TYPE_INVALID
	.endif
	.endm

	.macro PFX_OPD_SIZE
	.byte 0x66
	.endm

	.macro PFX_REX opd1 opd2 W=0
	.if ((\opd1 | \opd2) & 8) || \W
	.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
	.endif
	.endm

	.macro MODRM mod opd1 opd2
	.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
	.endm

.macro RDPID opd
	REG_TYPE rdpid_opd_type \opd
	.if rdpid_opd_type == REG_TYPE_R64
	R64_NUM rdpid_opd \opd
	.else
	R32_NUM rdpid_opd \opd
	.endif
	.byte 0xf3
	.if rdpid_opd > 7
	PFX_REX rdpid_opd 0
	.endif
	.byte 0x0f, 0xc7
	MODRM 0xc0 rdpid_opd 0x7
.endm
#endif

#endif

-- 
Cheers,
Stephen Rothwell

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-17  4:46 linux-next: manual merge of the tip tree with the crypto tree Stephen Rothwell
@ 2020-07-17  6:27 ` Uros Bizjak
  2020-07-17  6:44   ` Herbert Xu
  0 siblings, 1 reply; 16+ messages in thread
From: Uros Bizjak @ 2020-07-17  6:27 UTC (permalink / raw)
  To: Stephen Rothwell
  Cc: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	Herbert Xu, Linux Crypto List, Linux Next Mailing List,
	Linux Kernel Mailing List, Chang S. Bae, Sasha Levin

On Fri, Jul 17, 2020 at 6:47 AM Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> Hi all,
>
> Today's linux-next merge of the tip tree got a conflict in:
>
>   arch/x86/include/asm/inst.h
>
> between commit:
>
>   d7866e503bdc ("crypto: x86 - Remove include/asm/inst.h")
>
> from the crypto tree and commit:
>
>   eaad981291ee ("x86/entry/64: Introduce the FIND_PERCPU_BASE macro")
>
> from the tip tree.
>
> I fixed it up (I brought the file back but removed what the crypto tree
> no longer needed - see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging.  You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
>
> I think if the crypto tree brought back this file as well (even without
> the RDPID macro, it would make this conflict much more manageable.

I will prepare a v2 that leaves needed part of inst.h.

Uros.

> /* SPDX-License-Identifier: GPL-2.0 */
> /*
>  * Generate .byte code for some instructions not supported by old
>  * binutils.
>  */
> #ifndef X86_ASM_INST_H
> #define X86_ASM_INST_H
>
> #ifdef __ASSEMBLY__
>
> #define REG_NUM_INVALID         100
>
> #define REG_TYPE_R32            0
> #define REG_TYPE_R64            1
> #define REG_TYPE_XMM            2
> #define REG_TYPE_INVALID        100
>
>         .macro R32_NUM opd r32
>         \opd = REG_NUM_INVALID
>         .ifc \r32,%eax
>         \opd = 0
>         .endif
>         .ifc \r32,%ecx
>         \opd = 1
>         .endif
>         .ifc \r32,%edx
>         \opd = 2
>         .endif
>         .ifc \r32,%ebx
>         \opd = 3
>         .endif
>         .ifc \r32,%esp
>         \opd = 4
>         .endif
>         .ifc \r32,%ebp
>         \opd = 5
>         .endif
>         .ifc \r32,%esi
>         \opd = 6
>         .endif
>         .ifc \r32,%edi
>         \opd = 7
>         .endif
> #ifdef CONFIG_X86_64
>         .ifc \r32,%r8d
>         \opd = 8
>         .endif
>         .ifc \r32,%r9d
>         \opd = 9
>         .endif
>         .ifc \r32,%r10d
>         \opd = 10
>         .endif
>         .ifc \r32,%r11d
>         \opd = 11
>         .endif
>         .ifc \r32,%r12d
>         \opd = 12
>         .endif
>         .ifc \r32,%r13d
>         \opd = 13
>         .endif
>         .ifc \r32,%r14d
>         \opd = 14
>         .endif
>         .ifc \r32,%r15d
>         \opd = 15
>         .endif
> #endif
>         .endm
>
>         .macro R64_NUM opd r64
>         \opd = REG_NUM_INVALID
> #ifdef CONFIG_X86_64
>         .ifc \r64,%rax
>         \opd = 0
>         .endif
>         .ifc \r64,%rcx
>         \opd = 1
>         .endif
>         .ifc \r64,%rdx
>         \opd = 2
>         .endif
>         .ifc \r64,%rbx
>         \opd = 3
>         .endif
>         .ifc \r64,%rsp
>         \opd = 4
>         .endif
>         .ifc \r64,%rbp
>         \opd = 5
>         .endif
>         .ifc \r64,%rsi
>         \opd = 6
>         .endif
>         .ifc \r64,%rdi
>         \opd = 7
>         .endif
>         .ifc \r64,%r8
>         \opd = 8
>         .endif
>         .ifc \r64,%r9
>         \opd = 9
>         .endif
>         .ifc \r64,%r10
>         \opd = 10
>         .endif
>         .ifc \r64,%r11
>         \opd = 11
>         .endif
>         .ifc \r64,%r12
>         \opd = 12
>         .endif
>         .ifc \r64,%r13
>         \opd = 13
>         .endif
>         .ifc \r64,%r14
>         \opd = 14
>         .endif
>         .ifc \r64,%r15
>         \opd = 15
>         .endif
> #endif
>         .endm
>
>         .macro XMM_NUM opd xmm
>         \opd = REG_NUM_INVALID
>         .ifc \xmm,%xmm0
>         \opd = 0
>         .endif
>         .ifc \xmm,%xmm1
>         \opd = 1
>         .endif
>         .ifc \xmm,%xmm2
>         \opd = 2
>         .endif
>         .ifc \xmm,%xmm3
>         \opd = 3
>         .endif
>         .ifc \xmm,%xmm4
>         \opd = 4
>         .endif
>         .ifc \xmm,%xmm5
>         \opd = 5
>         .endif
>         .ifc \xmm,%xmm6
>         \opd = 6
>         .endif
>         .ifc \xmm,%xmm7
>         \opd = 7
>         .endif
>         .ifc \xmm,%xmm8
>         \opd = 8
>         .endif
>         .ifc \xmm,%xmm9
>         \opd = 9
>         .endif
>         .ifc \xmm,%xmm10
>         \opd = 10
>         .endif
>         .ifc \xmm,%xmm11
>         \opd = 11
>         .endif
>         .ifc \xmm,%xmm12
>         \opd = 12
>         .endif
>         .ifc \xmm,%xmm13
>         \opd = 13
>         .endif
>         .ifc \xmm,%xmm14
>         \opd = 14
>         .endif
>         .ifc \xmm,%xmm15
>         \opd = 15
>         .endif
>         .endm
>
>         .macro REG_TYPE type reg
>         R32_NUM reg_type_r32 \reg
>         R64_NUM reg_type_r64 \reg
>         XMM_NUM reg_type_xmm \reg
>         .if reg_type_r64 <> REG_NUM_INVALID
>         \type = REG_TYPE_R64
>         .elseif reg_type_r32 <> REG_NUM_INVALID
>         \type = REG_TYPE_R32
>         .elseif reg_type_xmm <> REG_NUM_INVALID
>         \type = REG_TYPE_XMM
>         .else
>         \type = REG_TYPE_INVALID
>         .endif
>         .endm
>
>         .macro PFX_OPD_SIZE
>         .byte 0x66
>         .endm
>
>         .macro PFX_REX opd1 opd2 W=0
>         .if ((\opd1 | \opd2) & 8) || \W
>         .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
>         .endif
>         .endm
>
>         .macro MODRM mod opd1 opd2
>         .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
>         .endm
>
> .macro RDPID opd
>         REG_TYPE rdpid_opd_type \opd
>         .if rdpid_opd_type == REG_TYPE_R64
>         R64_NUM rdpid_opd \opd
>         .else
>         R32_NUM rdpid_opd \opd
>         .endif
>         .byte 0xf3
>         .if rdpid_opd > 7
>         PFX_REX rdpid_opd 0
>         .endif
>         .byte 0x0f, 0xc7
>         MODRM 0xc0 rdpid_opd 0x7
> .endm
> #endif
>
> #endif
>
> --
> Cheers,
> Stephen Rothwell

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-17  6:27 ` Uros Bizjak
@ 2020-07-17  6:44   ` Herbert Xu
  2020-07-17  6:56     ` Stephen Rothwell
  2020-07-17  7:31     ` Uros Bizjak
  0 siblings, 2 replies; 16+ messages in thread
From: Herbert Xu @ 2020-07-17  6:44 UTC (permalink / raw)
  To: Uros Bizjak
  Cc: Stephen Rothwell, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	Peter Zijlstra, Linux Crypto List, Linux Next Mailing List,
	Linux Kernel Mailing List, Chang S. Bae, Sasha Levin

On Fri, Jul 17, 2020 at 08:27:27AM +0200, Uros Bizjak wrote:
>
> I will prepare a v2 that leaves needed part of inst.h.

Your patch has already been applied.  So please make it an
incremental patch.

Thanks,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-17  6:44   ` Herbert Xu
@ 2020-07-17  6:56     ` Stephen Rothwell
  2020-07-17  7:31     ` Uros Bizjak
  1 sibling, 0 replies; 16+ messages in thread
From: Stephen Rothwell @ 2020-07-17  6:56 UTC (permalink / raw)
  To: Herbert Xu
  Cc: Uros Bizjak, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	Peter Zijlstra, Linux Crypto List, Linux Next Mailing List,
	Linux Kernel Mailing List, Chang S. Bae, Sasha Levin

[-- Attachment #1: Type: text/plain, Size: 374 bytes --]

Hi Herbert,

On Fri, 17 Jul 2020 16:44:01 +1000 Herbert Xu <herbert@gondor.apana.org.au> wrote:
>
> On Fri, Jul 17, 2020 at 08:27:27AM +0200, Uros Bizjak wrote:
> >
> > I will prepare a v2 that leaves needed part of inst.h.  
> 
> Your patch has already been applied.  So please make it an
> incremental patch.

Thank you both.

-- 
Cheers,
Stephen Rothwell

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-17  6:44   ` Herbert Xu
  2020-07-17  6:56     ` Stephen Rothwell
@ 2020-07-17  7:31     ` Uros Bizjak
  2020-07-20  4:03       ` Stephen Rothwell
  2020-07-20 11:59       ` Herbert Xu
  1 sibling, 2 replies; 16+ messages in thread
From: Uros Bizjak @ 2020-07-17  7:31 UTC (permalink / raw)
  To: Herbert Xu
  Cc: Stephen Rothwell, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	Peter Zijlstra, Linux Crypto List, Linux Next Mailing List,
	Linux Kernel Mailing List, Chang S. Bae, Sasha Levin

[-- Attachment #1: Type: text/plain, Size: 604 bytes --]

Please find attached the incremental patch that puts back integer
parts of inst.h. This resolves the conflict with the tip tree.

Uros.

On Fri, Jul 17, 2020 at 8:45 AM Herbert Xu <herbert@gondor.apana.org.au> wrote:
>
> On Fri, Jul 17, 2020 at 08:27:27AM +0200, Uros Bizjak wrote:
> >
> > I will prepare a v2 that leaves needed part of inst.h.
>
> Your patch has already been applied.  So please make it an
> incremental patch.
>
> Thanks,
> --
> Email: Herbert Xu <herbert@gondor.apana.org.au>
> Home Page: http://gondor.apana.org.au/~herbert/
> PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

[-- Attachment #2: 0001-crypto-x86-Put-back-integer-parts-of-include-asm-ins.patch --]
[-- Type: text/x-patch, Size: 2886 bytes --]

From 2aed6d5ac4b561093921ffb0d1e4a31d9ad15d9d Mon Sep 17 00:00:00 2001
From: Uros Bizjak <ubizjak@gmail.com>
Date: Fri, 17 Jul 2020 09:24:53 +0200
Subject: [PATCH] crypto: x86 - Put back integer parts of include/asm/inst.h

Resolves conflict with the tip tree.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
---
 arch/x86/include/asm/inst.h | 148 ++++++++++++++++++++++++++++++++++++
 1 file changed, 148 insertions(+)
 create mode 100644 arch/x86/include/asm/inst.h

diff --git a/arch/x86/include/asm/inst.h b/arch/x86/include/asm/inst.h
new file mode 100644
index 000000000000..438ccd4f3cc4
--- /dev/null
+++ b/arch/x86/include/asm/inst.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Generate .byte code for some instructions not supported by old
+ * binutils.
+ */
+#ifndef X86_ASM_INST_H
+#define X86_ASM_INST_H
+
+#ifdef __ASSEMBLY__
+
+#define REG_NUM_INVALID		100
+
+#define REG_TYPE_R32		0
+#define REG_TYPE_R64		1
+#define REG_TYPE_INVALID	100
+
+	.macro R32_NUM opd r32
+	\opd = REG_NUM_INVALID
+	.ifc \r32,%eax
+	\opd = 0
+	.endif
+	.ifc \r32,%ecx
+	\opd = 1
+	.endif
+	.ifc \r32,%edx
+	\opd = 2
+	.endif
+	.ifc \r32,%ebx
+	\opd = 3
+	.endif
+	.ifc \r32,%esp
+	\opd = 4
+	.endif
+	.ifc \r32,%ebp
+	\opd = 5
+	.endif
+	.ifc \r32,%esi
+	\opd = 6
+	.endif
+	.ifc \r32,%edi
+	\opd = 7
+	.endif
+#ifdef CONFIG_X86_64
+	.ifc \r32,%r8d
+	\opd = 8
+	.endif
+	.ifc \r32,%r9d
+	\opd = 9
+	.endif
+	.ifc \r32,%r10d
+	\opd = 10
+	.endif
+	.ifc \r32,%r11d
+	\opd = 11
+	.endif
+	.ifc \r32,%r12d
+	\opd = 12
+	.endif
+	.ifc \r32,%r13d
+	\opd = 13
+	.endif
+	.ifc \r32,%r14d
+	\opd = 14
+	.endif
+	.ifc \r32,%r15d
+	\opd = 15
+	.endif
+#endif
+	.endm
+
+	.macro R64_NUM opd r64
+	\opd = REG_NUM_INVALID
+#ifdef CONFIG_X86_64
+	.ifc \r64,%rax
+	\opd = 0
+	.endif
+	.ifc \r64,%rcx
+	\opd = 1
+	.endif
+	.ifc \r64,%rdx
+	\opd = 2
+	.endif
+	.ifc \r64,%rbx
+	\opd = 3
+	.endif
+	.ifc \r64,%rsp
+	\opd = 4
+	.endif
+	.ifc \r64,%rbp
+	\opd = 5
+	.endif
+	.ifc \r64,%rsi
+	\opd = 6
+	.endif
+	.ifc \r64,%rdi
+	\opd = 7
+	.endif
+	.ifc \r64,%r8
+	\opd = 8
+	.endif
+	.ifc \r64,%r9
+	\opd = 9
+	.endif
+	.ifc \r64,%r10
+	\opd = 10
+	.endif
+	.ifc \r64,%r11
+	\opd = 11
+	.endif
+	.ifc \r64,%r12
+	\opd = 12
+	.endif
+	.ifc \r64,%r13
+	\opd = 13
+	.endif
+	.ifc \r64,%r14
+	\opd = 14
+	.endif
+	.ifc \r64,%r15
+	\opd = 15
+	.endif
+#endif
+	.endm
+
+	.macro REG_TYPE type reg
+	R32_NUM reg_type_r32 \reg
+	R64_NUM reg_type_r64 \reg
+	.if reg_type_r64 <> REG_NUM_INVALID
+	\type = REG_TYPE_R64
+	.elseif reg_type_r32 <> REG_NUM_INVALID
+	\type = REG_TYPE_R32
+	.else
+	\type = REG_TYPE_INVALID
+	.endif
+	.endm
+
+	.macro PFX_REX opd1 opd2 W=0
+	.if ((\opd1 | \opd2) & 8) || \W
+	.byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3)
+	.endif
+	.endm
+
+	.macro MODRM mod opd1 opd2
+	.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
+	.endm
+#endif
+
+#endif
-- 
2.26.2


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-17  7:31     ` Uros Bizjak
@ 2020-07-20  4:03       ` Stephen Rothwell
  2020-07-20  6:13         ` Uros Bizjak
  2020-07-20 11:59       ` Herbert Xu
  1 sibling, 1 reply; 16+ messages in thread
From: Stephen Rothwell @ 2020-07-20  4:03 UTC (permalink / raw)
  To: Uros Bizjak
  Cc: Herbert Xu, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	Peter Zijlstra, Linux Crypto List, Linux Next Mailing List,
	Linux Kernel Mailing List, Chang S. Bae, Sasha Levin

[-- Attachment #1: Type: text/plain, Size: 404 bytes --]

Hi Uros,

On Fri, 17 Jul 2020 09:31:18 +0200 Uros Bizjak <ubizjak@gmail.com> wrote:
>
> Please find attached the incremental patch that puts back integer
> parts of inst.h. This resolves the conflict with the tip tree.

The tip tree change needs the XMM parts kept as well, sorry.

So I ended up just removing the actual now unused crypto instruction
macros.

-- 
Cheers,
Stephen Rothwell

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-20  4:03       ` Stephen Rothwell
@ 2020-07-20  6:13         ` Uros Bizjak
  2020-07-20  6:29           ` Stephen Rothwell
  0 siblings, 1 reply; 16+ messages in thread
From: Uros Bizjak @ 2020-07-20  6:13 UTC (permalink / raw)
  To: Stephen Rothwell
  Cc: Herbert Xu, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	Peter Zijlstra, Linux Crypto List, Linux Next Mailing List,
	Linux Kernel Mailing List, Chang S. Bae, Sasha Levin

On Mon, Jul 20, 2020 at 6:03 AM Stephen Rothwell <sfr@canb.auug.org.au> wrote:

> > Please find attached the incremental patch that puts back integer
> > parts of inst.h. This resolves the conflict with the tip tree.
>
> The tip tree change needs the XMM parts kept as well, sorry.

Strange, because I did test my patch with the tip tree from
'origin/master' at commit a282cddefe90c4b21ef2c22a76a7c3ebd3ec6b86 and
the compilation produced the same lonely rdpid %eax in
.altinstr_replacement section. AFAICS, the header is included only for
RDPID macro, where XMM registers are unused.

> So I ended up just removing the actual now unused crypto instruction
> macros.

To avoid any further troubles, this is also OK with me.

Uros.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-20  6:13         ` Uros Bizjak
@ 2020-07-20  6:29           ` Stephen Rothwell
  0 siblings, 0 replies; 16+ messages in thread
From: Stephen Rothwell @ 2020-07-20  6:29 UTC (permalink / raw)
  To: Uros Bizjak
  Cc: Herbert Xu, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	Peter Zijlstra, Linux Crypto List, Linux Next Mailing List,
	Linux Kernel Mailing List, Chang S. Bae, Sasha Levin

[-- Attachment #1: Type: text/plain, Size: 1222 bytes --]

Hi Uros,

On Mon, 20 Jul 2020 08:13:51 +0200 Uros Bizjak <ubizjak@gmail.com> wrote:
>
> On Mon, Jul 20, 2020 at 6:03 AM Stephen Rothwell <sfr@canb.auug.org.au> wrote:
> 
> > > Please find attached the incremental patch that puts back integer
> > > parts of inst.h. This resolves the conflict with the tip tree.  
> >
> > The tip tree change needs the XMM parts kept as well, sorry.  
> 
> Strange, because I did test my patch with the tip tree from
> 'origin/master' at commit a282cddefe90c4b21ef2c22a76a7c3ebd3ec6b86 and
> the compilation produced the same lonely rdpid %eax in
> .altinstr_replacement section. AFAICS, the header is included only for
> RDPID macro, where XMM registers are unused.
> 
> > So I ended up just removing the actual now unused crypto instruction
> > macros.  
> 
> To avoid any further troubles, this is also OK with me.

Sorry, I see what happened now.  Since your patch was not in the crypto
tree yet, I did a fixup to the tip tree merge based on your patch, but
did it by hand and didn't remove the XMM bits from the REG_TYPE
macro ...

So your original patch is probably all good (especially since you
actually tested it :-))

-- 
Cheers,
Stephen Rothwell

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-17  7:31     ` Uros Bizjak
  2020-07-20  4:03       ` Stephen Rothwell
@ 2020-07-20 11:59       ` Herbert Xu
  1 sibling, 0 replies; 16+ messages in thread
From: Herbert Xu @ 2020-07-20 11:59 UTC (permalink / raw)
  To: Uros Bizjak
  Cc: Stephen Rothwell, Thomas Gleixner, Ingo Molnar, H. Peter Anvin,
	Peter Zijlstra, Linux Crypto List, Linux Next Mailing List,
	Linux Kernel Mailing List, Chang S. Bae, Sasha Levin

On Fri, Jul 17, 2020 at 09:31:18AM +0200, Uros Bizjak wrote:
> Please find attached the incremental patch that puts back integer
> parts of inst.h. This resolves the conflict with the tip tree.

Sorry but you can't send a patch without changing the Subject line
as otherwise patchwork will simply treat it as a comment or just
ignore it.

Please resend.

Thanks,
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 16+ messages in thread

* linux-next: manual merge of the tip tree with the crypto tree
@ 2022-11-28  1:29 Stephen Rothwell
  0 siblings, 0 replies; 16+ messages in thread
From: Stephen Rothwell @ 2022-11-28  1:29 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra, Herbert Xu
  Cc: Linux Crypto List, Eric Biggers, Linux Kernel Mailing List,
	Linux Next Mailing List

[-- Attachment #1: Type: text/plain, Size: 6780 bytes --]

Hi all,

Today's linux-next merge of the tip tree got conflicts in:

  arch/x86/crypto/sha1_ni_asm.S
  arch/x86/crypto/sha256-avx-asm.S
  arch/x86/crypto/sha256-avx2-asm.S
  arch/x86/crypto/sha256-ssse3-asm.S
  arch/x86/crypto/sha256_ni_asm.S
  arch/x86/crypto/sm3-avx-asm_64.S
  arch/x86/crypto/sm4-aesni-avx-asm_64.S
  arch/x86/crypto/sm4-aesni-avx2-asm_64.S

between commits:

  32f34bf7e44e ("crypto: x86/sha1 - fix possible crash with CFI enabled")
  19940ebbb59c ("crypto: x86/sha256 - fix possible crash with CFI enabled")
  8ba490d9f5a5 ("crypto: x86/sm3 - fix possible crash with CFI enabled")
  2d203c46a0fa ("crypto: x86/sm4 - fix crash with CFI enabled")

from the crypto tree and commits:

  c2a3ce6fdb12 ("crypto: x86/sha1: Remove custom alignments")
  3ba56d0b8711 ("crypto: x86/sha256: Remove custom alignments")
  2f93238b87dd ("crypto: x86/sm[34]: Remove redundant alignments")

from the tip tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc arch/x86/crypto/sha1_ni_asm.S
index 3cae5a1bb3d6,cd943b2af2c4..000000000000
--- a/arch/x86/crypto/sha1_ni_asm.S
+++ b/arch/x86/crypto/sha1_ni_asm.S
@@@ -93,8 -92,7 +93,7 @@@
   * numBlocks: Number of blocks to process
   */
  .text
- .align 32
 -SYM_FUNC_START(sha1_ni_transform)
 +SYM_TYPED_FUNC_START(sha1_ni_transform)
  	push		%rbp
  	mov		%rsp, %rbp
  	sub		$FRAME_SIZE, %rsp
diff --cc arch/x86/crypto/sha256-avx-asm.S
index 06ea30c20828,3649370690c5..000000000000
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@@ -347,8 -346,7 +347,7 @@@ a = TMP
  ## arg 3 : Num blocks
  ########################################################################
  .text
 -SYM_FUNC_START(sha256_transform_avx)
 +SYM_TYPED_FUNC_START(sha256_transform_avx)
- .align 32
  	pushq   %rbx
  	pushq   %r12
  	pushq   %r13
diff --cc arch/x86/crypto/sha256-avx2-asm.S
index 2d2be531a11e,c4c1dc5ee078..000000000000
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@@ -524,8 -523,7 +524,7 @@@ STACK_SIZE	= _CTX      + _CTX_SIZ
  ## arg 3 : Num blocks
  ########################################################################
  .text
 -SYM_FUNC_START(sha256_transform_rorx)
 +SYM_TYPED_FUNC_START(sha256_transform_rorx)
- .align 32
  	pushq	%rbx
  	pushq	%r12
  	pushq	%r13
diff --cc arch/x86/crypto/sha256-ssse3-asm.S
index 7db28839108d,96b7dcdeaebe..000000000000
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@@ -356,8 -355,7 +356,7 @@@ a = TMP
  ## arg 3 : Num blocks
  ########################################################################
  .text
 -SYM_FUNC_START(sha256_transform_ssse3)
 +SYM_TYPED_FUNC_START(sha256_transform_ssse3)
- .align 32
  	pushq   %rbx
  	pushq   %r12
  	pushq   %r13
diff --cc arch/x86/crypto/sha256_ni_asm.S
index 47f93937f798,b3f1a1a12027..000000000000
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/crypto/sha256_ni_asm.S
@@@ -97,8 -96,7 +97,7 @@@
   */
  
  .text
- .align 32
 -SYM_FUNC_START(sha256_ni_transform)
 +SYM_TYPED_FUNC_START(sha256_ni_transform)
  
  	shl		$6, NUM_BLKS		/*  convert to bytes */
  	jz		.Ldone_hash
diff --cc arch/x86/crypto/sm3-avx-asm_64.S
index 8fc5ac681fd6,b28d804ee10d..000000000000
--- a/arch/x86/crypto/sm3-avx-asm_64.S
+++ b/arch/x86/crypto/sm3-avx-asm_64.S
@@@ -328,8 -327,7 +328,7 @@@
   * void sm3_transform_avx(struct sm3_state *state,
   *                        const u8 *data, int nblocks);
   */
- .align 16
 -SYM_FUNC_START(sm3_transform_avx)
 +SYM_TYPED_FUNC_START(sm3_transform_avx)
  	/* input:
  	 *	%rdi: ctx, CTX
  	 *	%rsi: data (64*nblks bytes)
diff --cc arch/x86/crypto/sm4-aesni-avx-asm_64.S
index 22b6560eb9e1,e13c8537b2ec..000000000000
--- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
@@@ -420,8 -415,7 +416,7 @@@ SYM_FUNC_END(sm4_aesni_avx_crypt8
   * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst,
   *                                 const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (8 blocks)
@@@ -495,8 -489,7 +490,7 @@@ SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8
   * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst,
   *                                 const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (8 blocks)
@@@ -545,8 -538,7 +539,7 @@@ SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8
   * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst,
   *                                 const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (8 blocks)
diff --cc arch/x86/crypto/sm4-aesni-avx2-asm_64.S
index 23ee39a8ada8,2212705f7da6..000000000000
--- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
@@@ -282,8 -278,7 +279,7 @@@ SYM_FUNC_END(__sm4_crypt_blk16
   * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
   *                                   const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (16 blocks)
@@@ -395,8 -390,7 +391,7 @@@ SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk
   * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
   *                                   const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (16 blocks)
@@@ -449,8 -443,7 +444,7 @@@ SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk
   * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
   *                                   const u8 *src, u8 *iv)
   */
- .align 8
 -SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
 +SYM_TYPED_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
  	/* input:
  	 *	%rdi: round key array, CTX
  	 *	%rsi: dst (16 blocks)

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2020-07-21  4:28 Stephen Rothwell
@ 2020-08-03 22:14 ` Stephen Rothwell
  0 siblings, 0 replies; 16+ messages in thread
From: Stephen Rothwell @ 2020-08-03 22:14 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	Herbert Xu, Linux Crypto List
  Cc: Linux Next Mailing List, Linux Kernel Mailing List, Chang S. Bae,
	Sasha Levin, Uros Bizjak

[-- Attachment #1: Type: text/plain, Size: 4608 bytes --]

Hi all,

On Tue, 21 Jul 2020 14:28:45 +1000 Stephen Rothwell <sfr@canb.auug.org.au> wrote:
>
> Today's linux-next merge of the tip tree got a conflict in:
> 
>   arch/x86/include/asm/inst.h
> 
> between commit:
> 
>   d7866e503bdc ("crypto: x86 - Remove include/asm/inst.h")
> (also "crypto: x86 - Put back integer parts of include/asm/inst.h"
> which I have added to the crypto tree merge today)
> 
> from the crypto tree and commit:
> 
>   eaad981291ee ("x86/entry/64: Introduce the FIND_PERCPU_BASE macro")
> 
> from the tip tree.
> 
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging.  You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
> 
> -- 
> Cheers,
> Stephen Rothwell
> 
> diff --cc arch/x86/include/asm/inst.h
> index 438ccd4f3cc4,d063841a17e3..000000000000
> --- a/arch/x86/include/asm/inst.h
> +++ b/arch/x86/include/asm/inst.h
> @@@ -143,6 -203,124 +143,21 @@@
>   	.macro MODRM mod opd1 opd2
>   	.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
>   	.endm
> + 
>  -	.macro PSHUFB_XMM xmm1 xmm2
>  -	XMM_NUM pshufb_opd1 \xmm1
>  -	XMM_NUM pshufb_opd2 \xmm2
>  -	PFX_OPD_SIZE
>  -	PFX_REX pshufb_opd1 pshufb_opd2
>  -	.byte 0x0f, 0x38, 0x00
>  -	MODRM 0xc0 pshufb_opd1 pshufb_opd2
>  -	.endm
>  -
>  -	.macro PCLMULQDQ imm8 xmm1 xmm2
>  -	XMM_NUM clmul_opd1 \xmm1
>  -	XMM_NUM clmul_opd2 \xmm2
>  -	PFX_OPD_SIZE
>  -	PFX_REX clmul_opd1 clmul_opd2
>  -	.byte 0x0f, 0x3a, 0x44
>  -	MODRM 0xc0 clmul_opd1 clmul_opd2
>  -	.byte \imm8
>  -	.endm
>  -
>  -	.macro PEXTRD imm8 xmm gpr
>  -	R32_NUM extrd_opd1 \gpr
>  -	XMM_NUM extrd_opd2 \xmm
>  -	PFX_OPD_SIZE
>  -	PFX_REX extrd_opd1 extrd_opd2
>  -	.byte 0x0f, 0x3a, 0x16
>  -	MODRM 0xc0 extrd_opd1 extrd_opd2
>  -	.byte \imm8
>  -	.endm
>  -
>  -	.macro AESKEYGENASSIST rcon xmm1 xmm2
>  -	XMM_NUM aeskeygen_opd1 \xmm1
>  -	XMM_NUM aeskeygen_opd2 \xmm2
>  -	PFX_OPD_SIZE
>  -	PFX_REX aeskeygen_opd1 aeskeygen_opd2
>  -	.byte 0x0f, 0x3a, 0xdf
>  -	MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2
>  -	.byte \rcon
>  -	.endm
>  -
>  -	.macro AESIMC xmm1 xmm2
>  -	XMM_NUM aesimc_opd1 \xmm1
>  -	XMM_NUM aesimc_opd2 \xmm2
>  -	PFX_OPD_SIZE
>  -	PFX_REX aesimc_opd1 aesimc_opd2
>  -	.byte 0x0f, 0x38, 0xdb
>  -	MODRM 0xc0 aesimc_opd1 aesimc_opd2
>  -	.endm
>  -
>  -	.macro AESENC xmm1 xmm2
>  -	XMM_NUM aesenc_opd1 \xmm1
>  -	XMM_NUM aesenc_opd2 \xmm2
>  -	PFX_OPD_SIZE
>  -	PFX_REX aesenc_opd1 aesenc_opd2
>  -	.byte 0x0f, 0x38, 0xdc
>  -	MODRM 0xc0 aesenc_opd1 aesenc_opd2
>  -	.endm
>  -
>  -	.macro AESENCLAST xmm1 xmm2
>  -	XMM_NUM aesenclast_opd1 \xmm1
>  -	XMM_NUM aesenclast_opd2 \xmm2
>  -	PFX_OPD_SIZE
>  -	PFX_REX aesenclast_opd1 aesenclast_opd2
>  -	.byte 0x0f, 0x38, 0xdd
>  -	MODRM 0xc0 aesenclast_opd1 aesenclast_opd2
>  -	.endm
>  -
>  -	.macro AESDEC xmm1 xmm2
>  -	XMM_NUM aesdec_opd1 \xmm1
>  -	XMM_NUM aesdec_opd2 \xmm2
>  -	PFX_OPD_SIZE
>  -	PFX_REX aesdec_opd1 aesdec_opd2
>  -	.byte 0x0f, 0x38, 0xde
>  -	MODRM 0xc0 aesdec_opd1 aesdec_opd2
>  -	.endm
>  -
>  -	.macro AESDECLAST xmm1 xmm2
>  -	XMM_NUM aesdeclast_opd1 \xmm1
>  -	XMM_NUM aesdeclast_opd2 \xmm2
>  -	PFX_OPD_SIZE
>  -	PFX_REX aesdeclast_opd1 aesdeclast_opd2
>  -	.byte 0x0f, 0x38, 0xdf
>  -	MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
>  -	.endm
>  -
>  -	.macro MOVQ_R64_XMM opd1 opd2
>  -	REG_TYPE movq_r64_xmm_opd1_type \opd1
>  -	.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
>  -	XMM_NUM movq_r64_xmm_opd1 \opd1
>  -	R64_NUM movq_r64_xmm_opd2 \opd2
>  -	.else
>  -	R64_NUM movq_r64_xmm_opd1 \opd1
>  -	XMM_NUM movq_r64_xmm_opd2 \opd2
>  -	.endif
>  -	PFX_OPD_SIZE
>  -	PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
>  -	.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
>  -	.byte 0x0f, 0x7e
>  -	.else
>  -	.byte 0x0f, 0x6e
>  -	.endif
>  -	MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
>  -	.endm
>  -
> + .macro RDPID opd
> + 	REG_TYPE rdpid_opd_type \opd
> + 	.if rdpid_opd_type == REG_TYPE_R64
> + 	R64_NUM rdpid_opd \opd
> + 	.else
> + 	R32_NUM rdpid_opd \opd
> + 	.endif
> + 	.byte 0xf3
> + 	.if rdpid_opd > 7
> + 	PFX_REX rdpid_opd 0
> + 	.endif
> + 	.byte 0x0f, 0xc7
> + 	MODRM 0xc0 rdpid_opd 0x7
> + .endm
>   #endif
>   
>   #endif

This is now a conflict between the tip tree and Linus' tree.

-- 
Cheers,
Stephen Rothwell

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* linux-next: manual merge of the tip tree with the crypto tree
@ 2020-07-21  4:28 Stephen Rothwell
  2020-08-03 22:14 ` Stephen Rothwell
  0 siblings, 1 reply; 16+ messages in thread
From: Stephen Rothwell @ 2020-07-21  4:28 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	Herbert Xu, Linux Crypto List
  Cc: Linux Next Mailing List, Linux Kernel Mailing List, Chang S. Bae,
	Sasha Levin, Uros Bizjak

[-- Attachment #1: Type: text/plain, Size: 4112 bytes --]

Hi all,

Today's linux-next merge of the tip tree got a conflict in:

  arch/x86/include/asm/inst.h

between commit:

  d7866e503bdc ("crypto: x86 - Remove include/asm/inst.h")
(also "crypto: x86 - Put back integer parts of include/asm/inst.h"
which I have added to the crypto tree merge today)

from the crypto tree and commit:

  eaad981291ee ("x86/entry/64: Introduce the FIND_PERCPU_BASE macro")

from the tip tree.

I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging.  You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.

-- 
Cheers,
Stephen Rothwell

diff --cc arch/x86/include/asm/inst.h
index 438ccd4f3cc4,d063841a17e3..000000000000
--- a/arch/x86/include/asm/inst.h
+++ b/arch/x86/include/asm/inst.h
@@@ -143,6 -203,124 +143,21 @@@
  	.macro MODRM mod opd1 opd2
  	.byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3)
  	.endm
+ 
 -	.macro PSHUFB_XMM xmm1 xmm2
 -	XMM_NUM pshufb_opd1 \xmm1
 -	XMM_NUM pshufb_opd2 \xmm2
 -	PFX_OPD_SIZE
 -	PFX_REX pshufb_opd1 pshufb_opd2
 -	.byte 0x0f, 0x38, 0x00
 -	MODRM 0xc0 pshufb_opd1 pshufb_opd2
 -	.endm
 -
 -	.macro PCLMULQDQ imm8 xmm1 xmm2
 -	XMM_NUM clmul_opd1 \xmm1
 -	XMM_NUM clmul_opd2 \xmm2
 -	PFX_OPD_SIZE
 -	PFX_REX clmul_opd1 clmul_opd2
 -	.byte 0x0f, 0x3a, 0x44
 -	MODRM 0xc0 clmul_opd1 clmul_opd2
 -	.byte \imm8
 -	.endm
 -
 -	.macro PEXTRD imm8 xmm gpr
 -	R32_NUM extrd_opd1 \gpr
 -	XMM_NUM extrd_opd2 \xmm
 -	PFX_OPD_SIZE
 -	PFX_REX extrd_opd1 extrd_opd2
 -	.byte 0x0f, 0x3a, 0x16
 -	MODRM 0xc0 extrd_opd1 extrd_opd2
 -	.byte \imm8
 -	.endm
 -
 -	.macro AESKEYGENASSIST rcon xmm1 xmm2
 -	XMM_NUM aeskeygen_opd1 \xmm1
 -	XMM_NUM aeskeygen_opd2 \xmm2
 -	PFX_OPD_SIZE
 -	PFX_REX aeskeygen_opd1 aeskeygen_opd2
 -	.byte 0x0f, 0x3a, 0xdf
 -	MODRM 0xc0 aeskeygen_opd1 aeskeygen_opd2
 -	.byte \rcon
 -	.endm
 -
 -	.macro AESIMC xmm1 xmm2
 -	XMM_NUM aesimc_opd1 \xmm1
 -	XMM_NUM aesimc_opd2 \xmm2
 -	PFX_OPD_SIZE
 -	PFX_REX aesimc_opd1 aesimc_opd2
 -	.byte 0x0f, 0x38, 0xdb
 -	MODRM 0xc0 aesimc_opd1 aesimc_opd2
 -	.endm
 -
 -	.macro AESENC xmm1 xmm2
 -	XMM_NUM aesenc_opd1 \xmm1
 -	XMM_NUM aesenc_opd2 \xmm2
 -	PFX_OPD_SIZE
 -	PFX_REX aesenc_opd1 aesenc_opd2
 -	.byte 0x0f, 0x38, 0xdc
 -	MODRM 0xc0 aesenc_opd1 aesenc_opd2
 -	.endm
 -
 -	.macro AESENCLAST xmm1 xmm2
 -	XMM_NUM aesenclast_opd1 \xmm1
 -	XMM_NUM aesenclast_opd2 \xmm2
 -	PFX_OPD_SIZE
 -	PFX_REX aesenclast_opd1 aesenclast_opd2
 -	.byte 0x0f, 0x38, 0xdd
 -	MODRM 0xc0 aesenclast_opd1 aesenclast_opd2
 -	.endm
 -
 -	.macro AESDEC xmm1 xmm2
 -	XMM_NUM aesdec_opd1 \xmm1
 -	XMM_NUM aesdec_opd2 \xmm2
 -	PFX_OPD_SIZE
 -	PFX_REX aesdec_opd1 aesdec_opd2
 -	.byte 0x0f, 0x38, 0xde
 -	MODRM 0xc0 aesdec_opd1 aesdec_opd2
 -	.endm
 -
 -	.macro AESDECLAST xmm1 xmm2
 -	XMM_NUM aesdeclast_opd1 \xmm1
 -	XMM_NUM aesdeclast_opd2 \xmm2
 -	PFX_OPD_SIZE
 -	PFX_REX aesdeclast_opd1 aesdeclast_opd2
 -	.byte 0x0f, 0x38, 0xdf
 -	MODRM 0xc0 aesdeclast_opd1 aesdeclast_opd2
 -	.endm
 -
 -	.macro MOVQ_R64_XMM opd1 opd2
 -	REG_TYPE movq_r64_xmm_opd1_type \opd1
 -	.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
 -	XMM_NUM movq_r64_xmm_opd1 \opd1
 -	R64_NUM movq_r64_xmm_opd2 \opd2
 -	.else
 -	R64_NUM movq_r64_xmm_opd1 \opd1
 -	XMM_NUM movq_r64_xmm_opd2 \opd2
 -	.endif
 -	PFX_OPD_SIZE
 -	PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1
 -	.if movq_r64_xmm_opd1_type == REG_TYPE_XMM
 -	.byte 0x0f, 0x7e
 -	.else
 -	.byte 0x0f, 0x6e
 -	.endif
 -	MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2
 -	.endm
 -
+ .macro RDPID opd
+ 	REG_TYPE rdpid_opd_type \opd
+ 	.if rdpid_opd_type == REG_TYPE_R64
+ 	R64_NUM rdpid_opd \opd
+ 	.else
+ 	R32_NUM rdpid_opd \opd
+ 	.endif
+ 	.byte 0xf3
+ 	.if rdpid_opd > 7
+ 	PFX_REX rdpid_opd 0
+ 	.endif
+ 	.byte 0x0f, 0xc7
+ 	MODRM 0xc0 rdpid_opd 0x7
+ .endm
  #endif
  
  #endif

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 488 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* linux-next: manual merge of the tip tree with the crypto tree
@ 2017-11-08  2:21 Stephen Rothwell
  0 siblings, 0 replies; 16+ messages in thread
From: Stephen Rothwell @ 2017-11-08  2:21 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra, Herbert Xu
  Cc: Linux-Next Mailing List, Linux Kernel Mailing List, Kees Cook,
	Boris BREZILLON

Hi all,

Today's linux-next merge of the tip tree got a conflict in:

  drivers/crypto/mv_cesa.c

between commit:

  27b43fd95b14 ("crypto: marvell - Remove the old mv_cesa driver")

from the crypto tree and commit:

  f34d8d506eef ("crypto: Convert timers to use timer_setup()")

from the tip tree.

I fixed it up (I removed the file again) and can carry the fix as
necessary. This is now fixed as far as linux-next is concerned, but any
non trivial conflicts should be mentioned to your upstream maintainer
when your tree is submitted for merging.  You may also want to consider
cooperating with the maintainer of the conflicting tree to minimise any
particularly complex conflicts.

-- 
Cheers,
Stephen Rothwell

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: linux-next: manual merge of the tip tree with the crypto tree
  2015-10-12  3:24 Stephen Rothwell
@ 2015-10-12  6:18 ` Herbert Xu
  0 siblings, 0 replies; 16+ messages in thread
From: Herbert Xu @ 2015-10-12  6:18 UTC (permalink / raw)
  To: Stephen Rothwell
  Cc: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra,
	linux-next, linux-kernel, tim, Andy Lutomirski

On Mon, Oct 12, 2015 at 02:24:21PM +1100, Stephen Rothwell wrote:
> Hi all,
> 
> Today's linux-next merge of the tip tree got a conflict in:
> 
>   arch/x86/Makefile
> 
> between commit:
> 
>   e38b6b7fcfd1 ("crypto: x86/sha - Add build support for Intel SHA Extensions optimized SHA1 and SHA256")
> 
> from the crypto tree and commit:
> 
>   7b956f035a9e ("x86/asm: Re-add parts of the manual CFI infrastructure")
> 
> from the tip tree.
> 
> I fixed it up (see below) and can carry the fix as necessary (no action
> is required).

Thanks Stephen.
-- 
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

^ permalink raw reply	[flat|nested] 16+ messages in thread

* linux-next: manual merge of the tip tree with the crypto tree
@ 2015-10-12  3:24 Stephen Rothwell
  2015-10-12  6:18 ` Herbert Xu
  0 siblings, 1 reply; 16+ messages in thread
From: Stephen Rothwell @ 2015-10-12  3:24 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra, Herbert Xu
  Cc: linux-next, linux-kernel, tim, Andy Lutomirski

Hi all,

Today's linux-next merge of the tip tree got a conflict in:

  arch/x86/Makefile

between commit:

  e38b6b7fcfd1 ("crypto: x86/sha - Add build support for Intel SHA Extensions optimized SHA1 and SHA256")

from the crypto tree and commit:

  7b956f035a9e ("x86/asm: Re-add parts of the manual CFI infrastructure")

from the tip tree.

I fixed it up (see below) and can carry the fix as necessary (no action
is required).

-- 
Cheers,
Stephen Rothwell                    sfr@canb.auug.org.au

diff --cc arch/x86/Makefile
index a8009c77918a,2dfaa72260b4..000000000000
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@@ -165,11 -171,9 +171,11 @@@ asinstr += $(call as-instr,pshufb %xmm0
  asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
  avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
  avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 +sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
 +sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
  
- KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
- KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
 -KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
 -KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
++KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
++KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(sha1_ni_instr) $(sha256_ni_instr)
  
  LDFLAGS := -m elf_$(UTS_MACHINE)
  

^ permalink raw reply	[flat|nested] 16+ messages in thread

* linux-next: manual merge of the tip tree with the crypto tree
@ 2015-06-17  3:21 Michael Ellerman
  0 siblings, 0 replies; 16+ messages in thread
From: Michael Ellerman @ 2015-06-17  3:21 UTC (permalink / raw)
  To: Thomas Gleixner, Ingo Molnar, H. Peter Anvin, Peter Zijlstra, Herbert Xu
  Cc: linux-next, linux-kernel, David Hildenbrand, Benjamin Herrenschmidt

Hi all,

Today's linux-next merge of the tip tree got conflicts in:

  drivers/crypto/vmx/aes.c
  drivers/crypto/vmx/aes_cbc.c
  drivers/crypto/vmx/ghash.c

between commit:

  4beb10604597 "crypto: vmx - Reindent to kernel style"

from the crypto tree and commit:

  5f76eea88dcb "sched/preempt, powerpc: Disable preemption in enable_kernel_altivec() explicitly"

from the tip tree.

I fixed it up (see below) and can carry the fix as necessary (no action
is required).

cheers


diff --cc drivers/crypto/vmx/aes.c
index 023e5f014783,a9064e36e7b5..000000000000
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@@ -76,47 -73,53 +76,53 @@@ static void p8_aes_exit(struct crypto_t
  }
  
  static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
 -    unsigned int keylen)
 +			 unsigned int keylen)
  {
 -    int ret;
 -    struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 -
 -    preempt_disable();
 -    pagefault_disable();
 -    enable_kernel_altivec();
 -    ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 -    ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
 -    pagefault_enable();
 -    preempt_enable();
 -
 -    ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
 -    return ret;
 +	int ret;
 +	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 +
++	preempt_disable();
 +	pagefault_disable();
 +	enable_kernel_altivec();
 +	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 +	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
 +	pagefault_enable();
++	preempt_enable();
 +
 +	ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
 +	return ret;
  }
  
  static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  {
 -    struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 -
 -    if (in_interrupt()) {
 -        crypto_cipher_encrypt_one(ctx->fallback, dst, src);
 -    } else {
 -	preempt_disable();
 -        pagefault_disable();
 -        enable_kernel_altivec();
 -        aes_p8_encrypt(src, dst, &ctx->enc_key);
 -        pagefault_enable();
 -	preempt_enable();
 -    }
 +	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 +
 +	if (in_interrupt()) {
 +		crypto_cipher_encrypt_one(ctx->fallback, dst, src);
 +	} else {
++		preempt_disable();
 +		pagefault_disable();
 +		enable_kernel_altivec();
 +		aes_p8_encrypt(src, dst, &ctx->enc_key);
 +		pagefault_enable();
++		preempt_enable();
 +	}
  }
  
  static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  {
 -    struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 -
 -    if (in_interrupt()) {
 -        crypto_cipher_decrypt_one(ctx->fallback, dst, src);
 -    } else {
 -	preempt_disable();
 -        pagefault_disable();
 -        enable_kernel_altivec();
 -        aes_p8_decrypt(src, dst, &ctx->dec_key);
 -        pagefault_enable();
 -	preempt_enable();
 -    }
 +	struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 +
 +	if (in_interrupt()) {
 +		crypto_cipher_decrypt_one(ctx->fallback, dst, src);
 +	} else {
++		preempt_disable();
 +		pagefault_disable();
 +		enable_kernel_altivec();
 +		aes_p8_decrypt(src, dst, &ctx->dec_key);
 +		pagefault_enable();
++		preempt_enable();
 +	}
  }
  
  struct crypto_alg p8_aes_alg = {
diff --cc drivers/crypto/vmx/aes_cbc.c
index 7120ab24d8c6,477284abdd11..000000000000
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@@ -77,95 -74,95 +77,101 @@@ static void p8_aes_cbc_exit(struct cryp
  }
  
  static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
 -    unsigned int keylen)
 +			     unsigned int keylen)
  {
 -    int ret;
 -    struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
 -
 -    preempt_disable();
 -    pagefault_disable();
 -    enable_kernel_altivec();
 -    ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 -    ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
 -    pagefault_enable();
 -    preempt_enable();
 -
 -    ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
 -    return ret;
 +	int ret;
 +	struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
 +
++	preempt_disable();
 +	pagefault_disable();
 +	enable_kernel_altivec();
 +	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
 +	ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
 +	pagefault_enable();
++	preempt_enable();
 +
 +	ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
 +	return ret;
  }
  
  static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
 -    struct scatterlist *dst, struct scatterlist *src,
 -    unsigned int nbytes)
 +			      struct scatterlist *dst,
 +			      struct scatterlist *src, unsigned int nbytes)
  {
 -    int ret;
 -    struct blkcipher_walk walk;
 -    struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(
 -            crypto_blkcipher_tfm(desc->tfm));
 -    struct blkcipher_desc fallback_desc = {
 -        .tfm = ctx->fallback,
 -        .info = desc->info,
 -        .flags = desc->flags
 -    };
 -
 -    if (in_interrupt()) {
 -        ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
 -    } else {
 -	preempt_disable();
 -        pagefault_disable();
 -        enable_kernel_altivec();
 -
 -	blkcipher_walk_init(&walk, dst, src, nbytes);
 -        ret = blkcipher_walk_virt(desc, &walk);
 -        while ((nbytes = walk.nbytes)) {
 -			aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 -				nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1);
 +	int ret;
 +	struct blkcipher_walk walk;
 +	struct p8_aes_cbc_ctx *ctx =
 +		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
 +	struct blkcipher_desc fallback_desc = {
 +		.tfm = ctx->fallback,
 +		.info = desc->info,
 +		.flags = desc->flags
 +	};
 +
 +	if (in_interrupt()) {
 +		ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
 +					       nbytes);
 +	} else {
++		preempt_disable();
 +		pagefault_disable();
 +		enable_kernel_altivec();
 +
 +		blkcipher_walk_init(&walk, dst, src, nbytes);
 +		ret = blkcipher_walk_virt(desc, &walk);
 +		while ((nbytes = walk.nbytes)) {
 +			aes_p8_cbc_encrypt(walk.src.virt.addr,
 +					   walk.dst.virt.addr,
 +					   nbytes & AES_BLOCK_MASK,
 +					   &ctx->enc_key, walk.iv, 1);
  			nbytes &= AES_BLOCK_SIZE - 1;
  			ret = blkcipher_walk_done(desc, &walk, nbytes);
 -	}
 +		}
  
 -        pagefault_enable();
 -	preempt_enable();
 -    }
 +		pagefault_enable();
++		preempt_enable();
 +	}
  
 -    return ret;
 +	return ret;
  }
  
  static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
 -    struct scatterlist *dst, struct scatterlist *src,
 -    unsigned int nbytes)
 +			      struct scatterlist *dst,
 +			      struct scatterlist *src, unsigned int nbytes)
  {
 -    int ret;
 -    struct blkcipher_walk walk;
 -    struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(
 -            crypto_blkcipher_tfm(desc->tfm));
 -    struct blkcipher_desc fallback_desc = {
 -        .tfm = ctx->fallback,
 -        .info = desc->info,
 -        .flags = desc->flags
 -    };
 -
 -    if (in_interrupt()) {
 -        ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
 -    } else {
 -	preempt_disable();
 -        pagefault_disable();
 -        enable_kernel_altivec();
 -
 -	blkcipher_walk_init(&walk, dst, src, nbytes);
 -        ret = blkcipher_walk_virt(desc, &walk);
 -        while ((nbytes = walk.nbytes)) {
 -			aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
 -				nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0);
 +	int ret;
 +	struct blkcipher_walk walk;
 +	struct p8_aes_cbc_ctx *ctx =
 +		crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
 +	struct blkcipher_desc fallback_desc = {
 +		.tfm = ctx->fallback,
 +		.info = desc->info,
 +		.flags = desc->flags
 +	};
 +
 +	if (in_interrupt()) {
 +		ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
 +					       nbytes);
 +	} else {
++		preempt_disable();
 +		pagefault_disable();
 +		enable_kernel_altivec();
 +
 +		blkcipher_walk_init(&walk, dst, src, nbytes);
 +		ret = blkcipher_walk_virt(desc, &walk);
 +		while ((nbytes = walk.nbytes)) {
 +			aes_p8_cbc_encrypt(walk.src.virt.addr,
 +					   walk.dst.virt.addr,
 +					   nbytes & AES_BLOCK_MASK,
 +					   &ctx->dec_key, walk.iv, 0);
  			nbytes &= AES_BLOCK_SIZE - 1;
  			ret = blkcipher_walk_done(desc, &walk, nbytes);
  		}
  
 -        pagefault_enable();
 -	preempt_enable();
 -    }
 +		pagefault_enable();
++		preempt_enable();
 +	}
  
 -    return ret;
 +	return ret;
  }
  
  
diff --cc drivers/crypto/vmx/ghash.c
index 4c3a8f7e5059,f255ec4a04d4..000000000000
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@@ -109,92 -107,98 +109,100 @@@ static int p8_ghash_init(struct shash_d
  }
  
  static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
 -    unsigned int keylen)
 +			   unsigned int keylen)
  {
 -    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
 -
 -    if (keylen != GHASH_KEY_LEN)
 -        return -EINVAL;
 -
 -    preempt_disable();
 -    pagefault_disable();
 -    enable_kernel_altivec();
 -    enable_kernel_fp();
 -    gcm_init_p8(ctx->htable, (const u64 *) key);
 -    pagefault_enable();
 -    preempt_enable();
 -    return crypto_shash_setkey(ctx->fallback, key, keylen);
 +	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
 +
 +	if (keylen != GHASH_KEY_LEN)
 +		return -EINVAL;
 +
++	preempt_disable();
 +	pagefault_disable();
 +	enable_kernel_altivec();
 +	enable_kernel_fp();
 +	gcm_init_p8(ctx->htable, (const u64 *) key);
 +	pagefault_enable();
++	preempt_enable();
 +	return crypto_shash_setkey(ctx->fallback, key, keylen);
  }
  
  static int p8_ghash_update(struct shash_desc *desc,
 -        const u8 *src, unsigned int srclen)
 +			   const u8 *src, unsigned int srclen)
  {
 -    unsigned int len;
 -    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
 -    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
 -
 -    if (IN_INTERRUPT) {
 -        return crypto_shash_update(&dctx->fallback_desc, src, srclen);
 -    } else {
 -        if (dctx->bytes) {
 -            if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
 -                memcpy(dctx->buffer + dctx->bytes, src, srclen);
 -                dctx->bytes += srclen;
 -                return 0;
 -            }
 -            memcpy(dctx->buffer + dctx->bytes, src,
 -                    GHASH_DIGEST_SIZE - dctx->bytes);
 -	    preempt_disable();
 -            pagefault_disable();
 -            enable_kernel_altivec();
 -            enable_kernel_fp();
 -            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
 -                    GHASH_DIGEST_SIZE);
 -            pagefault_enable();
 -	    preempt_enable();
 -            src += GHASH_DIGEST_SIZE - dctx->bytes;
 -            srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
 -            dctx->bytes = 0;
 -        }
 -        len = srclen & ~(GHASH_DIGEST_SIZE - 1);
 -        if (len) {
 -	    preempt_disable();
 -            pagefault_disable();
 -            enable_kernel_altivec();
 -            enable_kernel_fp();
 -            gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
 -            pagefault_enable();
 -	    preempt_enable();
 -            src += len;
 -            srclen -= len;
 -        }
 -        if (srclen) {
 -            memcpy(dctx->buffer, src, srclen);
 -            dctx->bytes = srclen;
 -        }
 -        return 0;
 -    }
 +	unsigned int len;
 +	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
 +	struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
 +
 +	if (IN_INTERRUPT) {
 +		return crypto_shash_update(&dctx->fallback_desc, src,
 +					   srclen);
 +	} else {
 +		if (dctx->bytes) {
 +			if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
 +				memcpy(dctx->buffer + dctx->bytes, src,
 +				       srclen);
 +				dctx->bytes += srclen;
 +				return 0;
 +			}
 +			memcpy(dctx->buffer + dctx->bytes, src,
 +			       GHASH_DIGEST_SIZE - dctx->bytes);
++			preempt_disable();
 +			pagefault_disable();
 +			enable_kernel_altivec();
 +			enable_kernel_fp();
 +			gcm_ghash_p8(dctx->shash, ctx->htable,
 +				     dctx->buffer, GHASH_DIGEST_SIZE);
 +			pagefault_enable();
++			preempt_enable();
 +			src += GHASH_DIGEST_SIZE - dctx->bytes;
 +			srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
 +			dctx->bytes = 0;
 +		}
 +		len = srclen & ~(GHASH_DIGEST_SIZE - 1);
 +		if (len) {
++			preempt_disable();
 +			pagefault_disable();
 +			enable_kernel_altivec();
 +			enable_kernel_fp();
 +			gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
 +			pagefault_enable();
++			preempt_enable();
 +			src += len;
 +			srclen -= len;
 +		}
 +		if (srclen) {
 +			memcpy(dctx->buffer, src, srclen);
 +			dctx->bytes = srclen;
 +		}
 +		return 0;
 +	}
  }
  
  static int p8_ghash_final(struct shash_desc *desc, u8 *out)
  {
 -    int i;
 -    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
 -    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
 -
 -    if (IN_INTERRUPT) {
 -        return crypto_shash_final(&dctx->fallback_desc, out);
 -    } else {
 -        if (dctx->bytes) {
 -            for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
 -                dctx->buffer[i] = 0;
 -	    preempt_disable();
 -            pagefault_disable();
 -            enable_kernel_altivec();
 -            enable_kernel_fp();
 -            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
 -                    GHASH_DIGEST_SIZE);
 -            pagefault_enable();
 -	    preempt_enable();
 -            dctx->bytes = 0;
 -        }
 -        memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
 -        return 0;
 -    }
 +	int i;
 +	struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
 +	struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
 +
 +	if (IN_INTERRUPT) {
 +		return crypto_shash_final(&dctx->fallback_desc, out);
 +	} else {
 +		if (dctx->bytes) {
 +			for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
 +				dctx->buffer[i] = 0;
++			preempt_disable();
 +			pagefault_disable();
 +			enable_kernel_altivec();
 +			enable_kernel_fp();
 +			gcm_ghash_p8(dctx->shash, ctx->htable,
 +				     dctx->buffer, GHASH_DIGEST_SIZE);
 +			pagefault_enable();
++			preempt_enable();
 +			dctx->bytes = 0;
 +		}
 +		memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
 +		return 0;
 +	}
  }
  
  struct shash_alg p8_ghash_alg = {




^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2022-11-28  1:30 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-17  4:46 linux-next: manual merge of the tip tree with the crypto tree Stephen Rothwell
2020-07-17  6:27 ` Uros Bizjak
2020-07-17  6:44   ` Herbert Xu
2020-07-17  6:56     ` Stephen Rothwell
2020-07-17  7:31     ` Uros Bizjak
2020-07-20  4:03       ` Stephen Rothwell
2020-07-20  6:13         ` Uros Bizjak
2020-07-20  6:29           ` Stephen Rothwell
2020-07-20 11:59       ` Herbert Xu
  -- strict thread matches above, loose matches on Subject: below --
2022-11-28  1:29 Stephen Rothwell
2020-07-21  4:28 Stephen Rothwell
2020-08-03 22:14 ` Stephen Rothwell
2017-11-08  2:21 Stephen Rothwell
2015-10-12  3:24 Stephen Rothwell
2015-10-12  6:18 ` Herbert Xu
2015-06-17  3:21 Michael Ellerman

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.