LKML Archive on lore.kernel.org
 help / color / Atom feed
From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: x86@kernel.org, Juergen Gross <jgross@suse.com>,
	Andi Kleen <ak@linux.intel.com>
Subject: [patch 3/3] x86/paravirt: Replace paravirt patch asm magic
Date: Wed, 24 Apr 2019 15:41:18 +0200
Message-ID: <20190424134223.690835713@linutronix.de> (raw)
In-Reply-To: <20190424134115.091452807@linutronix.de>

The magic macro DEF_NATIVE() in the paravirt patching code uses inline
assembly to generate a data table for patching in the native instructions.

While clever this is falling apart with LTO and even aside of LTO the
construct is just working by chance according to GCC folks.

Aside of that the tables are constant data and not some form of magic
text.

As these constructs are not subject to frequent changes it is not a
maintenance issue to convert them to regular data tables which are
initialized with hex bytes.

Create a new set of macros and data structures to store the instruction
sequences and convert the code over.

Reported-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
 arch/x86/include/asm/paravirt_types.h |    4 
 arch/x86/kernel/paravirt_patch.c      |  144 +++++++++++++++++++---------------
 2 files changed, 82 insertions(+), 66 deletions(-)

--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -370,10 +370,6 @@ extern struct paravirt_patch_template pv
 /* Simple instruction patching code. */
 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
 
-#define DEF_NATIVE(ops, name, code)					\
-	__visible extern const char start_##ops##_##name[], end_##ops##_##name[];	\
-	asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
-
 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
 unsigned paravirt_patch_default(u8 type, void *insnbuf,
 				unsigned long addr, unsigned len);
--- a/arch/x86/kernel/paravirt_patch.c
+++ b/arch/x86/kernel/paravirt_patch.c
@@ -4,103 +4,123 @@
 #include <asm/paravirt.h>
 #include <asm/asm-offsets.h>
 
-#ifdef CONFIG_X86_64
-# ifdef CONFIG_PARAVIRT_XXL
-DEF_NATIVE(irq, irq_disable, "cli");
-DEF_NATIVE(irq, irq_enable, "sti");
-DEF_NATIVE(irq, restore_fl, "pushq %rdi; popfq");
-DEF_NATIVE(irq, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(mmu, read_cr2, "movq %cr2, %rax");
-DEF_NATIVE(mmu, read_cr3, "movq %cr3, %rax");
-DEF_NATIVE(mmu, write_cr3, "movq %rdi, %cr3");
-DEF_NATIVE(cpu, wbinvd, "wbinvd");
-DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
-DEF_NATIVE(cpu, swapgs, "swapgs");
-DEF_NATIVE(, mov64, "mov %rdi, %rax");
+#define PSTART(d, m)							\
+	patch_data_##d.m
 
-unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len)
-{
-	return paravirt_patch_insns(insnbuf, len, start__mov64, end__mov64);
-}
-# endif /* CONFIG_PARAVIRT_XXL */
+#define PEND(d, m)							\
+	(PSTART(d, m) + sizeof(patch_data_##d.m))
 
-# ifdef CONFIG_PARAVIRT_SPINLOCKS
-DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
-DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
-# endif
+#define PATCH(d, m, ibuf, len)						\
+	paravirt_patch_insns(ibuf, len, PSTART(d, m), PEND(d, m))
 
-#else /* CONFIG_X86_64 */
+#define PATCH_CASE(ops, m, data, ibuf, len)				\
+	case PARAVIRT_PATCH(ops.m):					\
+		return PATCH(data, ops##_##m, ibuf, len)
 
-# ifdef CONFIG_PARAVIRT_XXL
-DEF_NATIVE(irq, irq_disable, "cli");
-DEF_NATIVE(irq, irq_enable, "sti");
-DEF_NATIVE(irq, restore_fl, "push %eax; popf");
-DEF_NATIVE(irq, save_fl, "pushf; pop %eax");
-DEF_NATIVE(cpu, iret, "iret");
-DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
-DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
-DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
+#ifdef CONFIG_PARAVIRT_XXL
+struct patch_xxl {
+	const unsigned char	irq_irq_disable[1];
+	const unsigned char	irq_irq_enable[1];
+	const unsigned char	irq_restore_fl[2];
+	const unsigned char	irq_save_fl[2];
+	const unsigned char	mmu_read_cr2[3];
+	const unsigned char	mmu_read_cr3[3];
+	const unsigned char	mmu_write_cr3[3];
+# ifdef CONFIG_X86_64
+	const unsigned char	cpu_wbinvd[2];
+	const unsigned char	cpu_usergs_sysret64[6];
+	const unsigned char	cpu_swapgs[3];
+	const unsigned char	mov64[3];
+# else
+	const unsigned char	cpu_iret[1];
+# endif
+};
+
+static const struct patch_xxl patch_data_xxl = {
+	.irq_irq_disable	= { 0xfa },		// cli
+	.irq_irq_enable		= { 0xfb },		// sti
+	.irq_save_fl		= { 0x9c, 0x58 },	// pushf; pop %[re]ax
+	.mmu_read_cr2		= { 0x0f, 0x20, 0xd0 },	// mov %cr2, %[re]ax
+	.mmu_read_cr3		= { 0x0f, 0x20, 0xd8 },	// mov %cr3, %[re]ax
+# ifdef CONFIG_X86_64
+	.irq_restore_fl		= { 0x57, 0x9d },	// push %rdi; popfq
+	.mmu_write_cr3		= { 0x0f, 0x22, 0xdf },	// mov %rdi, %cr3
+	.cpu_wbinvd		= { 0x0f, 0x09 },	// wbinvd
+	.cpu_usergs_sysret64	= { 0x0f, 0x01, 0xf8,
+				    0x48, 0x0f, 0x07 },	// swapgs; sysretq
+	.cpu_swapgs		= { 0x0f, 0x01, 0xf8 },	// swapgs
+	.mov64			= { 0x48, 0x89, 0xf8 },	// mov %rdi, %rax
+# else
+	.irq_restore_fl		= { 0x50, 0x9d },	// push %eax; popf
+	.mmu_write_cr3		= { 0x0f, 0x22, 0xd8 },	// mov %eax, %cr3
+	.cpu_iret		= { 0xcf },		// iret
+# endif
+};
 
 unsigned int paravirt_patch_ident_64(void *insnbuf, unsigned int len)
 {
-	/* arg in %edx:%eax, return in %edx:%eax */
+#ifdef CONFIG_X86_64
+	return PATCH(xxl, mov64, insnbuf, len);
+#endif
 	return 0;
 }
 # endif /* CONFIG_PARAVIRT_XXL */
 
-# ifdef CONFIG_PARAVIRT_SPINLOCKS
-DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
-DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
-# endif
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+struct patch_lock {
+	unsigned char queued_spin_unlock[3];
+	unsigned char vcpu_is_preempted[2];
+};
 
-#endif /* !CONFIG_X86_64 */
+static const struct patch_lock patch_data_lock = {
+	.vcpu_is_preempted	= { 0x31, 0xc0 },	// xor %eax, %eax
+
+# ifdef CONFIG_X86_64
+	.queued_spin_unlock	= { 0xc6, 0x07, 0x00 },	// movb $0, (%rdi)
+# else
+	.queued_spin_unlock	= { 0xc6, 0x00, 0x00 },	// movb $0, (%eax)
+# endif
+};
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
 
 unsigned int native_patch(u8 type, void *ibuf, unsigned long addr,
 			  unsigned int len)
 {
-#define PATCH_SITE(ops, x)					\
-	case PARAVIRT_PATCH(ops.x):				\
-		return paravirt_patch_insns(ibuf, len, start_##ops##_##x, end_##ops##_##x)
-
 	switch (type) {
+
 #ifdef CONFIG_PARAVIRT_XXL
-		PATCH_SITE(irq, restore_fl);
-		PATCH_SITE(irq, save_fl);
-		PATCH_SITE(irq, irq_enable);
-		PATCH_SITE(irq, irq_disable);
-
-		PATCH_SITE(mmu, read_cr2);
-		PATCH_SITE(mmu, read_cr3);
-		PATCH_SITE(mmu, write_cr3);
+	PATCH_CASE(irq, restore_fl, xxl, ibuf, len);
+	PATCH_CASE(irq, save_fl, xxl, ibuf, len);
+	PATCH_CASE(irq, irq_enable, xxl, ibuf, len);
+	PATCH_CASE(irq, irq_disable, xxl, ibuf, len);
+
+	PATCH_CASE(mmu, read_cr2, xxl, ibuf, len);
+	PATCH_CASE(mmu, read_cr3, xxl, ibuf, len);
+	PATCH_CASE(mmu, write_cr3, xxl, ibuf, len);
 
 # ifdef CONFIG_X86_64
-		PATCH_SITE(cpu, usergs_sysret64);
-		PATCH_SITE(cpu, swapgs);
-		PATCH_SITE(cpu, wbinvd);
+	PATCH_CASE(cpu, usergs_sysret64, xxl, ibuf, len);
+	PATCH_CASE(cpu, swapgs, xxl, ibuf, len);
+	PATCH_CASE(cpu, wbinvd, xxl, ibuf, len);
 # else
-		PATCH_SITE(cpu, iret);
+	PATCH_CASE(cpu, iret, xxl, ibuf, len);
 # endif
 #endif
 
 #ifdef CONFIG_PARAVIRT_SPINLOCKS
 	case PARAVIRT_PATCH(lock.queued_spin_unlock):
 		if (pv_is_native_spin_unlock())
-			return paravirt_patch_insns(ibuf, len,
-						    start_lock_queued_spin_unlock,
-						    end_lock_queued_spin_unlock);
+			return PATCH(lock, queued_spin_unlock, ibuf, len);
 		break;
 
 	case PARAVIRT_PATCH(lock.vcpu_is_preempted):
 		if (pv_is_native_vcpu_is_preempted())
-			return paravirt_patch_insns(ibuf, len,
-						    start_lock_vcpu_is_preempted,
-						    end_lock_vcpu_is_preempted);
+			return PATCH(lock, vcpu_is_preempted, ibuf, len);
 		break;
 #endif
-
 	default:
 		break;
 	}
-#undef PATCH_SITE
+
 	return paravirt_patch_default(type, ibuf, addr, len);
 }



  parent reply index

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-24 13:41 [patch 0/3] x86/paravirt: Rework paravirt patching Thomas Gleixner
2019-04-24 13:41 ` [patch 1/3] x86/paravirt: Remove bogus extern declarations Thomas Gleixner
2019-04-25  7:31   ` [tip:x86/paravirt] " tip-bot for Thomas Gleixner
2019-05-24  7:58   ` tip-bot for Thomas Gleixner
2019-04-24 13:41 ` [patch 2/3] x86/paravirt: Unify 32/64 bit patch code Thomas Gleixner
2019-04-25  7:32   ` [tip:x86/paravirt] " tip-bot for Thomas Gleixner
2019-05-24  8:00   ` [tip:x86/paravirt] x86/paravirt: Unify the 32/64 bit paravirt patching code tip-bot for Thomas Gleixner
2019-04-24 13:41 ` Thomas Gleixner [this message]
2019-04-25  6:52   ` [patch 3/3] x86/paravirt: Replace paravirt patch asm magic Ingo Molnar
2019-04-25  7:22     ` Thomas Gleixner
2019-04-25  7:46       ` Juergen Gross
2019-04-25  8:10       ` [PATCH] x86/paravirt: Match paravirt patchlet field definition ordering to initialization ordering Ingo Molnar
2019-04-25  9:17         ` [PATCH] x86/paravirt: Detect oversized patching bugs as they happen and BUG_ON() to avoid later crashes Ingo Molnar
2019-04-25  9:21           ` Peter Zijlstra
2019-04-25  9:50             ` x86/paravirt: Detect over-sized patching bugs in paravirt_patch_call() Ingo Molnar
2019-04-25 10:22               ` Peter Zijlstra
2019-04-25 10:57                 ` Ingo Molnar
2019-04-25 11:30                   ` Juergen Gross
2019-04-25 12:30                     ` Juergen Gross
2019-04-25 11:40                   ` Peter Zijlstra
2019-04-25 12:30                     ` Peter Zijlstra
2019-05-24  7:59               ` [tip:x86/paravirt] " tip-bot for Ingo Molnar
2019-05-24  7:58           ` [tip:x86/paravirt] x86/paravirt: Detect over-sized patching bugs in paravirt_patch_insns() tip-bot for Ingo Molnar
2019-05-24  8:01         ` [tip:x86/paravirt] x86/paravirt: Match paravirt patchlet field definition ordering to initialization ordering tip-bot for Ingo Molnar
2019-04-25  8:08     ` [patch 3/3] x86/paravirt: Replace paravirt patch asm magic Peter Zijlstra
2019-04-25  8:19       ` Peter Zijlstra
2019-04-25  9:20       ` Ingo Molnar
2019-05-24  8:00   ` [tip:x86/paravirt] x86/paravirt: Replace the " tip-bot for Thomas Gleixner

Reply instructions:

You may reply publically to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190424134223.690835713@linutronix.de \
    --to=tglx@linutronix.de \
    --cc=ak@linux.intel.com \
    --cc=jgross@suse.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

LKML Archive on lore.kernel.org

Archives are clonable:
	git clone --mirror https://lore.kernel.org/lkml/0 lkml/git/0.git
	git clone --mirror https://lore.kernel.org/lkml/1 lkml/git/1.git
	git clone --mirror https://lore.kernel.org/lkml/2 lkml/git/2.git
	git clone --mirror https://lore.kernel.org/lkml/3 lkml/git/3.git
	git clone --mirror https://lore.kernel.org/lkml/4 lkml/git/4.git
	git clone --mirror https://lore.kernel.org/lkml/5 lkml/git/5.git
	git clone --mirror https://lore.kernel.org/lkml/6 lkml/git/6.git
	git clone --mirror https://lore.kernel.org/lkml/7 lkml/git/7.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 lkml lkml/ https://lore.kernel.org/lkml \
		linux-kernel@vger.kernel.org linux-kernel@archiver.kernel.org
	public-inbox-index lkml


Newsgroup available over NNTP:
	nntp://nntp.lore.kernel.org/org.kernel.vger.linux-kernel


AGPL code for this site: git clone https://public-inbox.org/ public-inbox