All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ankur Arora <ankur.a.arora@oracle.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org
Cc: peterz@infradead.org, hpa@zytor.com, jpoimboe@redhat.com,
	namit@vmware.com, mhiramat@kernel.org, jgross@suse.com,
	bp@alien8.de, vkuznets@redhat.com, pbonzini@redhat.com,
	boris.ostrovsky@oracle.com, mihai.carabas@oracle.com,
	kvm@vger.kernel.org, xen-devel@lists.xenproject.org,
	virtualization@lists.linux-foundation.org,
	Ankur Arora <ankur.a.arora@oracle.com>
Subject: [RFC PATCH 01/26] x86/paravirt: Specify subsection in PVOP macros
Date: Tue,  7 Apr 2020 22:02:58 -0700	[thread overview]
Message-ID: <20200408050323.4237-2-ankur.a.arora@oracle.com> (raw)
In-Reply-To: <20200408050323.4237-1-ankur.a.arora@oracle.com>

Allow PVOP macros to specify a subsection such that _paravirt_alt() can
optionally put sites in .parainstructions.*.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 arch/x86/include/asm/paravirt_types.h | 158 +++++++++++++++++---------
 1 file changed, 102 insertions(+), 56 deletions(-)

diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 732f62e04ddb..37e8f27a3b9d 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -337,6 +337,9 @@ struct paravirt_patch_template {
 extern struct pv_info pv_info;
 extern struct paravirt_patch_template pv_ops;
 
+/* Sub-section for .parainstructions */
+#define PV_SUFFIX ""
+
 #define PARAVIRT_PATCH(x)					\
 	(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
 
@@ -350,9 +353,9 @@ extern struct paravirt_patch_template pv_ops;
  * Generate some code, and mark it as patchable by the
  * apply_paravirt() alternate instruction patcher.
  */
-#define _paravirt_alt(insn_string, type, clobber)	\
+#define _paravirt_alt(sec, insn_string, type, clobber)	\
 	"771:\n\t" insn_string "\n" "772:\n"		\
-	".pushsection .parainstructions,\"a\"\n"	\
+	".pushsection .parainstructions" sec ",\"a\"\n"	\
 	_ASM_ALIGN "\n"					\
 	_ASM_PTR " 771b\n"				\
 	"  .byte " type "\n"				\
@@ -361,8 +364,9 @@ extern struct paravirt_patch_template pv_ops;
 	".popsection\n"
 
 /* Generate patchable code, with the default asm parameters. */
-#define paravirt_alt(insn_string)					\
-	_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
+#define paravirt_alt(sec, insn_string)					\
+	_paravirt_alt(sec, insn_string, "%c[paravirt_typenum]",		\
+		      "%c[paravirt_clobber]")
 
 /* Simple instruction patching code. */
 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
@@ -414,7 +418,7 @@ int paravirt_disable_iospace(void);
  * unfortunately, are quite a bit (r8 - r11)
  *
  * The call instruction itself is marked by placing its start address
- * and size into the .parainstructions section, so that
+ * and size into the .parainstructions* sections, so that
  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
  * appropriate patching under the control of the backend pv_init_ops
  * implementation.
@@ -512,7 +516,7 @@ int paravirt_disable_iospace(void);
 	})
 
 
-#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,		\
+#define ____PVOP_CALL(sec, rettype, op, clbr, call_clbr, extra_clbr,	\
 		      pre, post, ...)					\
 	({								\
 		rettype __ret;						\
@@ -522,7 +526,7 @@ int paravirt_disable_iospace(void);
 		/* since this condition will never hold */		\
 		if (sizeof(rettype) > sizeof(unsigned long)) {		\
 			asm volatile(pre				\
-				     paravirt_alt(PARAVIRT_CALL)	\
+				     paravirt_alt(sec, PARAVIRT_CALL)	\
 				     post				\
 				     : call_clbr, ASM_CALL_CONSTRAINT	\
 				     : paravirt_type(op),		\
@@ -532,7 +536,7 @@ int paravirt_disable_iospace(void);
 			__ret = (rettype)((((u64)__edx) << 32) | __eax); \
 		} else {						\
 			asm volatile(pre				\
-				     paravirt_alt(PARAVIRT_CALL)	\
+				     paravirt_alt(sec, PARAVIRT_CALL)	\
 				     post				\
 				     : call_clbr, ASM_CALL_CONSTRAINT	\
 				     : paravirt_type(op),		\
@@ -544,22 +548,22 @@ int paravirt_disable_iospace(void);
 		__ret;							\
 	})
 
-#define __PVOP_CALL(rettype, op, pre, post, ...)			\
-	____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,	\
+#define __PVOP_CALL(sec, rettype, op, pre, post, ...)			\
+	____PVOP_CALL(sec, rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,	\
 		      EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
 
-#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)			\
-	____PVOP_CALL(rettype, op.func, CLBR_RET_REG,			\
+#define __PVOP_CALLEESAVE(sec, rettype, op, pre, post, ...)		\
+	____PVOP_CALL(sec, rettype, op.func, CLBR_RET_REG,		\
 		      PVOP_CALLEE_CLOBBERS, ,				\
 		      pre, post, ##__VA_ARGS__)
 
 
-#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)	\
+#define ____PVOP_VCALL(sec, op, clbr, call_clbr, extra_clbr, pre, post, ...)	\
 	({								\
 		PVOP_VCALL_ARGS;					\
 		PVOP_TEST_NULL(op);					\
 		asm volatile(pre					\
-			     paravirt_alt(PARAVIRT_CALL)		\
+			     paravirt_alt(sec, PARAVIRT_CALL)		\
 			     post					\
 			     : call_clbr, ASM_CALL_CONSTRAINT		\
 			     : paravirt_type(op),			\
@@ -568,85 +572,127 @@ int paravirt_disable_iospace(void);
 			     : "memory", "cc" extra_clbr);		\
 	})
 
-#define __PVOP_VCALL(op, pre, post, ...)				\
-	____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,		\
+#define __PVOP_VCALL(sec, op, pre, post, ...)				\
+	____PVOP_VCALL(sec, op, CLBR_ANY, PVOP_VCALL_CLOBBERS,		\
 		       VEXTRA_CLOBBERS,					\
 		       pre, post, ##__VA_ARGS__)
 
-#define __PVOP_VCALLEESAVE(op, pre, post, ...)				\
-	____PVOP_VCALL(op.func, CLBR_RET_REG,				\
+#define __PVOP_VCALLEESAVE(sec, op, pre, post, ...)			\
+	____PVOP_VCALL(sec, op.func, CLBR_RET_REG,			\
 		      PVOP_VCALLEE_CLOBBERS, ,				\
 		      pre, post, ##__VA_ARGS__)
 
 
 
-#define PVOP_CALL0(rettype, op)						\
-	__PVOP_CALL(rettype, op, "", "")
-#define PVOP_VCALL0(op)							\
-	__PVOP_VCALL(op, "", "")
+#define _PVOP_CALL0(sec, rettype, op)					\
+	__PVOP_CALL(sec, rettype, op, "", "")
+#define _PVOP_VCALL0(sec, op)						\
+	__PVOP_VCALL(sec, op, "", "")
 
-#define PVOP_CALLEE0(rettype, op)					\
-	__PVOP_CALLEESAVE(rettype, op, "", "")
-#define PVOP_VCALLEE0(op)						\
-	__PVOP_VCALLEESAVE(op, "", "")
+#define _PVOP_CALLEE0(sec, rettype, op)					\
+	__PVOP_CALLEESAVE(sec, rettype, op, "", "")
+#define _PVOP_VCALLEE0(sec, op)						\
+	__PVOP_VCALLEESAVE(sec, op, "", "")
 
 
-#define PVOP_CALL1(rettype, op, arg1)					\
-	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
-#define PVOP_VCALL1(op, arg1)						\
-	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+#define _PVOP_CALL1(sec, rettype, op, arg1)				\
+	__PVOP_CALL(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define _PVOP_VCALL1(sec, op, arg1)					\
+	__PVOP_VCALL(sec, op, "", "", PVOP_CALL_ARG1(arg1))
 
-#define PVOP_CALLEE1(rettype, op, arg1)					\
-	__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
-#define PVOP_VCALLEE1(op, arg1)						\
-	__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+#define _PVOP_CALLEE1(sec, rettype, op, arg1)				\
+	__PVOP_CALLEESAVE(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define _PVOP_VCALLEE1(sec, op, arg1)					\
+	__PVOP_VCALLEESAVE(sec, op, "", "", PVOP_CALL_ARG1(arg1))
 
-
-#define PVOP_CALL2(rettype, op, arg1, arg2)				\
-	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),		\
+#define _PVOP_CALL2(sec, rettype, op, arg1, arg2)			\
+	__PVOP_CALL(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1),	\
 		    PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALL2(op, arg1, arg2)					\
-	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),			\
+#define _PVOP_VCALL2(sec, op, arg1, arg2)				\
+	__PVOP_VCALL(sec, op, "", "", PVOP_CALL_ARG1(arg1),		\
 		     PVOP_CALL_ARG2(arg2))
 
-#define PVOP_CALLEE2(rettype, op, arg1, arg2)				\
-	__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),	\
+#define _PVOP_CALLEE2(sec, rettype, op, arg1, arg2)			\
+	__PVOP_CALLEESAVE(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
 			  PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALLEE2(op, arg1, arg2)					\
-	__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),		\
+#define _PVOP_VCALLEE2(sec, op, arg1, arg2)				\
+	__PVOP_VCALLEESAVE(sec, op, "", "", PVOP_CALL_ARG1(arg1),	\
 			   PVOP_CALL_ARG2(arg2))
 
 
-#define PVOP_CALL3(rettype, op, arg1, arg2, arg3)			\
-	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),		\
+#define _PVOP_CALL3(sec, rettype, op, arg1, arg2, arg3)			\
+	__PVOP_CALL(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1),	\
 		    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
-#define PVOP_VCALL3(op, arg1, arg2, arg3)				\
-	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),			\
+#define _PVOP_VCALL3(sec, op, arg1, arg2, arg3)				\
+	__PVOP_VCALL(sec, op, "", "", PVOP_CALL_ARG1(arg1),		\
 		     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
 
 /* This is the only difference in x86_64. We can make it much simpler */
 #ifdef CONFIG_X86_32
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
-	__PVOP_CALL(rettype, op,					\
+#define _PVOP_CALL4(sec, rettype, op, arg1, arg2, arg3, arg4)		\
+	__PVOP_CALL(sec, rettype, op,					\
 		    "push %[_arg4];", "lea 4(%%esp),%%esp;",		\
 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
 		    PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
-	__PVOP_VCALL(op,						\
+#define _PVOP_VCALL4(sec, op, arg1, arg2, arg3, arg4)			\
+	__PVOP_VCALL(sec, op,						\
 		    "push %[_arg4];", "lea 4(%%esp),%%esp;",		\
 		    "0" ((u32)(arg1)), "1" ((u32)(arg2)),		\
 		    "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
 #else
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
-	__PVOP_CALL(rettype, op, "", "",				\
+#define _PVOP_CALL4(sec, rettype, op, arg1, arg2, arg3, arg4)		\
+	__PVOP_CALL(sec, rettype, op, "", "",				\
 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
 		    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
-	__PVOP_VCALL(op, "", "",					\
+#define _PVOP_VCALL4(sec, op, arg1, arg2, arg3, arg4)			\
+	__PVOP_VCALL(sec, op, "", "",					\
 		     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),	\
 		     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
 #endif
 
+/*
+ * PVOP macros for .parainstructions
+ */
+#define PVOP_CALL0(rettype, op)						\
+	_PVOP_CALL0(PV_SUFFIX, rettype, op)
+#define PVOP_VCALL0(op)							\
+	_PVOP_VCALL0(PV_SUFFIX, op)
+
+#define PVOP_CALLEE0(rettype, op)					\
+	_PVOP_CALLEE0(PV_SUFFIX, rettype, op)
+#define PVOP_VCALLEE0(op)						\
+	_PVOP_VCALLEE0(PV_SUFFIX, op)
+
+#define PVOP_CALL1(rettype, op, arg1)					\
+	_PVOP_CALL1(PV_SUFFIX, rettype, op, arg1)
+#define PVOP_VCALL1(op, arg1)						\
+	_PVOP_VCALL1(PV_SUFFIX, op, arg1)
+
+#define PVOP_CALLEE1(rettype, op, arg1)					\
+	_PVOP_CALLEE1(PV_SUFFIX, rettype, op, arg1)
+#define PVOP_VCALLEE1(op, arg1)						\
+	_PVOP_VCALLEE1(PV_SUFFIX, op, arg1)
+
+#define PVOP_CALL2(rettype, op, arg1, arg2)				\
+	_PVOP_CALL2(PV_SUFFIX, rettype, op, arg1, arg2)
+#define PVOP_VCALL2(op, arg1, arg2)					\
+	_PVOP_VCALL2(PV_SUFFIX, op, arg1, arg2)
+
+#define PVOP_CALLEE2(rettype, op, arg1, arg2)				\
+	_PVOP_CALLEE2(PV_SUFFIX, rettype, op, arg1, arg2)
+#define PVOP_VCALLEE2(op, arg1, arg2)					\
+	_PVOP_VCALLEE2(PV_SUFFIX, op, arg1, arg2)
+
+#define PVOP_CALL3(rettype, op, arg1, arg2, arg3)			\
+	_PVOP_CALL3(PV_SUFFIX, rettype, op, arg1, arg2, arg3)
+#define PVOP_VCALL3(op, arg1, arg2, arg3)				\
+	_PVOP_VCALL3(PV_SUFFIX, op, arg1, arg2, arg3)
+
+#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
+	_PVOP_CALL4(PV_SUFFIX, rettype, op, arg1, arg2, arg3, arg4)
+#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
+	_PVOP_VCALL4(PV_SUFFIX, op, arg1, arg2, arg3, arg4)
+
 /* Lazy mode for batching updates / context switch */
 enum paravirt_lazy_mode {
 	PARAVIRT_LAZY_NONE,
@@ -667,7 +713,7 @@ u64 _paravirt_ident_64(u64);
 
 #define paravirt_nop	((void *)_paravirt_nop)
 
-/* These all sit in the .parainstructions section to tell us what to patch. */
+/* These all sit in .parainstructions* sections to tell us what to patch. */
 struct paravirt_patch_site {
 	u8 *instr;		/* original instructions */
 	u8 type;		/* type of this instruction */
-- 
2.20.1


WARNING: multiple messages have this Message-ID (diff)
From: Ankur Arora <ankur.a.arora@oracle.com>
To: linux-kernel@vger.kernel.org, x86@kernel.org
Cc: jgross@suse.com, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org, peterz@infradead.org, hpa@zytor.com,
	Ankur Arora <ankur.a.arora@oracle.com>,
	virtualization@lists.linux-foundation.org, pbonzini@redhat.com,
	namit@vmware.com, mhiramat@kernel.org, jpoimboe@redhat.com,
	mihai.carabas@oracle.com, bp@alien8.de, vkuznets@redhat.com,
	boris.ostrovsky@oracle.com
Subject: [RFC PATCH 01/26] x86/paravirt: Specify subsection in PVOP macros
Date: Tue,  7 Apr 2020 22:02:58 -0700	[thread overview]
Message-ID: <20200408050323.4237-2-ankur.a.arora@oracle.com> (raw)
In-Reply-To: <20200408050323.4237-1-ankur.a.arora@oracle.com>

Allow PVOP macros to specify a subsection such that _paravirt_alt() can
optionally put sites in .parainstructions.*.

Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com>
---
 arch/x86/include/asm/paravirt_types.h | 158 +++++++++++++++++---------
 1 file changed, 102 insertions(+), 56 deletions(-)

diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 732f62e04ddb..37e8f27a3b9d 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -337,6 +337,9 @@ struct paravirt_patch_template {
 extern struct pv_info pv_info;
 extern struct paravirt_patch_template pv_ops;
 
+/* Sub-section for .parainstructions */
+#define PV_SUFFIX ""
+
 #define PARAVIRT_PATCH(x)					\
 	(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
 
@@ -350,9 +353,9 @@ extern struct paravirt_patch_template pv_ops;
  * Generate some code, and mark it as patchable by the
  * apply_paravirt() alternate instruction patcher.
  */
-#define _paravirt_alt(insn_string, type, clobber)	\
+#define _paravirt_alt(sec, insn_string, type, clobber)	\
 	"771:\n\t" insn_string "\n" "772:\n"		\
-	".pushsection .parainstructions,\"a\"\n"	\
+	".pushsection .parainstructions" sec ",\"a\"\n"	\
 	_ASM_ALIGN "\n"					\
 	_ASM_PTR " 771b\n"				\
 	"  .byte " type "\n"				\
@@ -361,8 +364,9 @@ extern struct paravirt_patch_template pv_ops;
 	".popsection\n"
 
 /* Generate patchable code, with the default asm parameters. */
-#define paravirt_alt(insn_string)					\
-	_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
+#define paravirt_alt(sec, insn_string)					\
+	_paravirt_alt(sec, insn_string, "%c[paravirt_typenum]",		\
+		      "%c[paravirt_clobber]")
 
 /* Simple instruction patching code. */
 #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
@@ -414,7 +418,7 @@ int paravirt_disable_iospace(void);
  * unfortunately, are quite a bit (r8 - r11)
  *
  * The call instruction itself is marked by placing its start address
- * and size into the .parainstructions section, so that
+ * and size into the .parainstructions* sections, so that
  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
  * appropriate patching under the control of the backend pv_init_ops
  * implementation.
@@ -512,7 +516,7 @@ int paravirt_disable_iospace(void);
 	})
 
 
-#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,		\
+#define ____PVOP_CALL(sec, rettype, op, clbr, call_clbr, extra_clbr,	\
 		      pre, post, ...)					\
 	({								\
 		rettype __ret;						\
@@ -522,7 +526,7 @@ int paravirt_disable_iospace(void);
 		/* since this condition will never hold */		\
 		if (sizeof(rettype) > sizeof(unsigned long)) {		\
 			asm volatile(pre				\
-				     paravirt_alt(PARAVIRT_CALL)	\
+				     paravirt_alt(sec, PARAVIRT_CALL)	\
 				     post				\
 				     : call_clbr, ASM_CALL_CONSTRAINT	\
 				     : paravirt_type(op),		\
@@ -532,7 +536,7 @@ int paravirt_disable_iospace(void);
 			__ret = (rettype)((((u64)__edx) << 32) | __eax); \
 		} else {						\
 			asm volatile(pre				\
-				     paravirt_alt(PARAVIRT_CALL)	\
+				     paravirt_alt(sec, PARAVIRT_CALL)	\
 				     post				\
 				     : call_clbr, ASM_CALL_CONSTRAINT	\
 				     : paravirt_type(op),		\
@@ -544,22 +548,22 @@ int paravirt_disable_iospace(void);
 		__ret;							\
 	})
 
-#define __PVOP_CALL(rettype, op, pre, post, ...)			\
-	____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,	\
+#define __PVOP_CALL(sec, rettype, op, pre, post, ...)			\
+	____PVOP_CALL(sec, rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,	\
 		      EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
 
-#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)			\
-	____PVOP_CALL(rettype, op.func, CLBR_RET_REG,			\
+#define __PVOP_CALLEESAVE(sec, rettype, op, pre, post, ...)		\
+	____PVOP_CALL(sec, rettype, op.func, CLBR_RET_REG,		\
 		      PVOP_CALLEE_CLOBBERS, ,				\
 		      pre, post, ##__VA_ARGS__)
 
 
-#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)	\
+#define ____PVOP_VCALL(sec, op, clbr, call_clbr, extra_clbr, pre, post, ...)	\
 	({								\
 		PVOP_VCALL_ARGS;					\
 		PVOP_TEST_NULL(op);					\
 		asm volatile(pre					\
-			     paravirt_alt(PARAVIRT_CALL)		\
+			     paravirt_alt(sec, PARAVIRT_CALL)		\
 			     post					\
 			     : call_clbr, ASM_CALL_CONSTRAINT		\
 			     : paravirt_type(op),			\
@@ -568,85 +572,127 @@ int paravirt_disable_iospace(void);
 			     : "memory", "cc" extra_clbr);		\
 	})
 
-#define __PVOP_VCALL(op, pre, post, ...)				\
-	____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,		\
+#define __PVOP_VCALL(sec, op, pre, post, ...)				\
+	____PVOP_VCALL(sec, op, CLBR_ANY, PVOP_VCALL_CLOBBERS,		\
 		       VEXTRA_CLOBBERS,					\
 		       pre, post, ##__VA_ARGS__)
 
-#define __PVOP_VCALLEESAVE(op, pre, post, ...)				\
-	____PVOP_VCALL(op.func, CLBR_RET_REG,				\
+#define __PVOP_VCALLEESAVE(sec, op, pre, post, ...)			\
+	____PVOP_VCALL(sec, op.func, CLBR_RET_REG,			\
 		      PVOP_VCALLEE_CLOBBERS, ,				\
 		      pre, post, ##__VA_ARGS__)
 
 
 
-#define PVOP_CALL0(rettype, op)						\
-	__PVOP_CALL(rettype, op, "", "")
-#define PVOP_VCALL0(op)							\
-	__PVOP_VCALL(op, "", "")
+#define _PVOP_CALL0(sec, rettype, op)					\
+	__PVOP_CALL(sec, rettype, op, "", "")
+#define _PVOP_VCALL0(sec, op)						\
+	__PVOP_VCALL(sec, op, "", "")
 
-#define PVOP_CALLEE0(rettype, op)					\
-	__PVOP_CALLEESAVE(rettype, op, "", "")
-#define PVOP_VCALLEE0(op)						\
-	__PVOP_VCALLEESAVE(op, "", "")
+#define _PVOP_CALLEE0(sec, rettype, op)					\
+	__PVOP_CALLEESAVE(sec, rettype, op, "", "")
+#define _PVOP_VCALLEE0(sec, op)						\
+	__PVOP_VCALLEESAVE(sec, op, "", "")
 
 
-#define PVOP_CALL1(rettype, op, arg1)					\
-	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
-#define PVOP_VCALL1(op, arg1)						\
-	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+#define _PVOP_CALL1(sec, rettype, op, arg1)				\
+	__PVOP_CALL(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define _PVOP_VCALL1(sec, op, arg1)					\
+	__PVOP_VCALL(sec, op, "", "", PVOP_CALL_ARG1(arg1))
 
-#define PVOP_CALLEE1(rettype, op, arg1)					\
-	__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
-#define PVOP_VCALLEE1(op, arg1)						\
-	__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+#define _PVOP_CALLEE1(sec, rettype, op, arg1)				\
+	__PVOP_CALLEESAVE(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define _PVOP_VCALLEE1(sec, op, arg1)					\
+	__PVOP_VCALLEESAVE(sec, op, "", "", PVOP_CALL_ARG1(arg1))
 
-
-#define PVOP_CALL2(rettype, op, arg1, arg2)				\
-	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),		\
+#define _PVOP_CALL2(sec, rettype, op, arg1, arg2)			\
+	__PVOP_CALL(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1),	\
 		    PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALL2(op, arg1, arg2)					\
-	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),			\
+#define _PVOP_VCALL2(sec, op, arg1, arg2)				\
+	__PVOP_VCALL(sec, op, "", "", PVOP_CALL_ARG1(arg1),		\
 		     PVOP_CALL_ARG2(arg2))
 
-#define PVOP_CALLEE2(rettype, op, arg1, arg2)				\
-	__PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),	\
+#define _PVOP_CALLEE2(sec, rettype, op, arg1, arg2)			\
+	__PVOP_CALLEESAVE(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
 			  PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALLEE2(op, arg1, arg2)					\
-	__PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),		\
+#define _PVOP_VCALLEE2(sec, op, arg1, arg2)				\
+	__PVOP_VCALLEESAVE(sec, op, "", "", PVOP_CALL_ARG1(arg1),	\
 			   PVOP_CALL_ARG2(arg2))
 
 
-#define PVOP_CALL3(rettype, op, arg1, arg2, arg3)			\
-	__PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),		\
+#define _PVOP_CALL3(sec, rettype, op, arg1, arg2, arg3)			\
+	__PVOP_CALL(sec, rettype, op, "", "", PVOP_CALL_ARG1(arg1),	\
 		    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
-#define PVOP_VCALL3(op, arg1, arg2, arg3)				\
-	__PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),			\
+#define _PVOP_VCALL3(sec, op, arg1, arg2, arg3)				\
+	__PVOP_VCALL(sec, op, "", "", PVOP_CALL_ARG1(arg1),		\
 		     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
 
 /* This is the only difference in x86_64. We can make it much simpler */
 #ifdef CONFIG_X86_32
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
-	__PVOP_CALL(rettype, op,					\
+#define _PVOP_CALL4(sec, rettype, op, arg1, arg2, arg3, arg4)		\
+	__PVOP_CALL(sec, rettype, op,					\
 		    "push %[_arg4];", "lea 4(%%esp),%%esp;",		\
 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
 		    PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
-	__PVOP_VCALL(op,						\
+#define _PVOP_VCALL4(sec, op, arg1, arg2, arg3, arg4)			\
+	__PVOP_VCALL(sec, op,						\
 		    "push %[_arg4];", "lea 4(%%esp),%%esp;",		\
 		    "0" ((u32)(arg1)), "1" ((u32)(arg2)),		\
 		    "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
 #else
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
-	__PVOP_CALL(rettype, op, "", "",				\
+#define _PVOP_CALL4(sec, rettype, op, arg1, arg2, arg3, arg4)		\
+	__PVOP_CALL(sec, rettype, op, "", "",				\
 		    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),		\
 		    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
-	__PVOP_VCALL(op, "", "",					\
+#define _PVOP_VCALL4(sec, op, arg1, arg2, arg3, arg4)			\
+	__PVOP_VCALL(sec, op, "", "",					\
 		     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),	\
 		     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
 #endif
 
+/*
+ * PVOP macros for .parainstructions
+ */
+#define PVOP_CALL0(rettype, op)						\
+	_PVOP_CALL0(PV_SUFFIX, rettype, op)
+#define PVOP_VCALL0(op)							\
+	_PVOP_VCALL0(PV_SUFFIX, op)
+
+#define PVOP_CALLEE0(rettype, op)					\
+	_PVOP_CALLEE0(PV_SUFFIX, rettype, op)
+#define PVOP_VCALLEE0(op)						\
+	_PVOP_VCALLEE0(PV_SUFFIX, op)
+
+#define PVOP_CALL1(rettype, op, arg1)					\
+	_PVOP_CALL1(PV_SUFFIX, rettype, op, arg1)
+#define PVOP_VCALL1(op, arg1)						\
+	_PVOP_VCALL1(PV_SUFFIX, op, arg1)
+
+#define PVOP_CALLEE1(rettype, op, arg1)					\
+	_PVOP_CALLEE1(PV_SUFFIX, rettype, op, arg1)
+#define PVOP_VCALLEE1(op, arg1)						\
+	_PVOP_VCALLEE1(PV_SUFFIX, op, arg1)
+
+#define PVOP_CALL2(rettype, op, arg1, arg2)				\
+	_PVOP_CALL2(PV_SUFFIX, rettype, op, arg1, arg2)
+#define PVOP_VCALL2(op, arg1, arg2)					\
+	_PVOP_VCALL2(PV_SUFFIX, op, arg1, arg2)
+
+#define PVOP_CALLEE2(rettype, op, arg1, arg2)				\
+	_PVOP_CALLEE2(PV_SUFFIX, rettype, op, arg1, arg2)
+#define PVOP_VCALLEE2(op, arg1, arg2)					\
+	_PVOP_VCALLEE2(PV_SUFFIX, op, arg1, arg2)
+
+#define PVOP_CALL3(rettype, op, arg1, arg2, arg3)			\
+	_PVOP_CALL3(PV_SUFFIX, rettype, op, arg1, arg2, arg3)
+#define PVOP_VCALL3(op, arg1, arg2, arg3)				\
+	_PVOP_VCALL3(PV_SUFFIX, op, arg1, arg2, arg3)
+
+#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)			\
+	_PVOP_CALL4(PV_SUFFIX, rettype, op, arg1, arg2, arg3, arg4)
+#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)				\
+	_PVOP_VCALL4(PV_SUFFIX, op, arg1, arg2, arg3, arg4)
+
 /* Lazy mode for batching updates / context switch */
 enum paravirt_lazy_mode {
 	PARAVIRT_LAZY_NONE,
@@ -667,7 +713,7 @@ u64 _paravirt_ident_64(u64);
 
 #define paravirt_nop	((void *)_paravirt_nop)
 
-/* These all sit in the .parainstructions section to tell us what to patch. */
+/* These all sit in .parainstructions* sections to tell us what to patch. */
 struct paravirt_patch_site {
 	u8 *instr;		/* original instructions */
 	u8 type;		/* type of this instruction */
-- 
2.20.1



  reply	other threads:[~2020-04-08  5:05 UTC|newest]

Thread overview: 93+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-08  5:02 [RFC PATCH 00/26] Runtime paravirt patching Ankur Arora
2020-04-08  5:02 ` Ankur Arora
2020-04-08  5:02 ` Ankur Arora [this message]
2020-04-08  5:02   ` [RFC PATCH 01/26] x86/paravirt: Specify subsection in PVOP macros Ankur Arora
2020-04-08  5:02 ` [RFC PATCH 02/26] x86/paravirt: Allow paravirt patching post-init Ankur Arora
2020-04-08  5:02   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 03/26] x86/paravirt: PVRTOP macros for PARAVIRT_RUNTIME Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 04/26] x86/alternatives: Refactor alternatives_smp_module* Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 05/26] x86/alternatives: Rename alternatives_smp*, smp_alt_module Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 06/26] x86/alternatives: Remove stale symbols Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 07/26] x86/paravirt: Persist .parainstructions.runtime Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 08/26] x86/paravirt: Stash native pv-ops Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 09/26] x86/paravirt: Add runtime_patch() Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08 11:05   ` Peter Zijlstra
2020-04-08 11:05     ` Peter Zijlstra
2020-04-08 11:05     ` Peter Zijlstra
2020-04-08  5:03 ` [RFC PATCH 10/26] x86/paravirt: Add primitives to stage pv-ops Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 11/26] x86/alternatives: Remove return value of text_poke*() Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 12/26] x86/alternatives: Use __get_unlocked_pte() in text_poke() Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 13/26] x86/alternatives: Split __text_poke() Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 14/26] x86/alternatives: Handle native insns in text_poke_loc*() Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08 11:11   ` Peter Zijlstra
2020-04-08 11:11     ` Peter Zijlstra
2020-04-08 11:11     ` Peter Zijlstra
2020-04-08 11:17   ` Peter Zijlstra
2020-04-08 11:17     ` Peter Zijlstra
2020-04-08 11:17     ` Peter Zijlstra
2020-04-08  5:03 ` [RFC PATCH 15/26] x86/alternatives: Non-emulated text poking Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08 11:13   ` Peter Zijlstra
2020-04-08 11:13     ` Peter Zijlstra
2020-04-08 11:13     ` Peter Zijlstra
2020-04-08 11:23   ` Peter Zijlstra
2020-04-08 11:23     ` Peter Zijlstra
2020-04-08 11:23     ` Peter Zijlstra
2020-04-08  5:03 ` [RFC PATCH 16/26] x86/alternatives: Add paravirt patching at runtime Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 17/26] x86/alternatives: Add patching logic in text_poke_site() Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 18/26] x86/alternatives: Handle BP in non-emulated text poking Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 19/26] x86/alternatives: NMI safe runtime patching Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08 11:36   ` Peter Zijlstra
2020-04-08 11:36     ` Peter Zijlstra
2020-04-08 11:36     ` Peter Zijlstra
2020-04-08  5:03 ` [RFC PATCH 20/26] x86/paravirt: Enable pv-spinlocks in runtime_patch() Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 21/26] x86/alternatives: Paravirt runtime selftest Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 22/26] kvm/paravirt: Encapsulate KVM pv switching logic Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 23/26] x86/kvm: Add worker to trigger runtime patching Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 24/26] x86/kvm: Support dynamic CPUID hints Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 25/26] x86/kvm: Guest support for dynamic hints Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08  5:03 ` [RFC PATCH 26/26] x86/kvm: Add hint change notifier for KVM_HINT_REALTIME Ankur Arora
2020-04-08  5:03   ` Ankur Arora
2020-04-08 12:08 ` [RFC PATCH 00/26] Runtime paravirt patching Peter Zijlstra
2020-04-08 12:08   ` Peter Zijlstra
2020-04-08 12:08   ` Peter Zijlstra
2020-04-08 13:33   ` Jürgen Groß
2020-04-08 13:33     ` Jürgen Groß
2020-04-08 14:49     ` Peter Zijlstra
2020-04-08 14:49       ` Peter Zijlstra
2020-04-08 14:49       ` Peter Zijlstra
2020-04-10  9:18   ` Ankur Arora
2020-04-10  9:18     ` Ankur Arora
2020-04-08 12:28 ` Jürgen Groß
2020-04-08 12:28   ` Jürgen Groß
2020-04-10  7:56   ` Ankur Arora
2020-04-10  7:56     ` Ankur Arora
2020-04-10  9:32   ` Ankur Arora
2020-04-10  9:32     ` Ankur Arora
2020-04-08 14:12 ` Thomas Gleixner
2020-04-08 14:12   ` Thomas Gleixner
2020-04-08 14:12   ` Thomas Gleixner
2020-04-10  9:55   ` Ankur Arora
2020-04-10  9:55     ` Ankur Arora

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200408050323.4237-2-ankur.a.arora@oracle.com \
    --to=ankur.a.arora@oracle.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=bp@alien8.de \
    --cc=hpa@zytor.com \
    --cc=jgross@suse.com \
    --cc=jpoimboe@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mhiramat@kernel.org \
    --cc=mihai.carabas@oracle.com \
    --cc=namit@vmware.com \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=vkuznets@redhat.com \
    --cc=x86@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.