All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: [PATCH v2 44/70] x86/pmu: CFI hardening
Date: Mon, 14 Feb 2022 12:51:01 +0000	[thread overview]
Message-ID: <20220214125127.17985-45-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <20220214125127.17985-1-andrew.cooper3@citrix.com>

Control Flow Integrity schemes use toolchain and optionally hardware support
to help protect against call/jump/return oriented programming attacks.

Use cf_check to annotate function pointer targets for the toolchain.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/cpu/vpmu_amd.c             | 16 ++++++++--------
 xen/arch/x86/cpu/vpmu_intel.c           | 16 ++++++++--------
 xen/arch/x86/oprofile/op_model_athlon.c | 16 ++++++++--------
 xen/arch/x86/oprofile/op_model_p4.c     | 14 +++++++-------
 xen/arch/x86/oprofile/op_model_ppro.c   | 26 ++++++++++++++------------
 5 files changed, 45 insertions(+), 43 deletions(-)

diff --git a/xen/arch/x86/cpu/vpmu_amd.c b/xen/arch/x86/cpu/vpmu_amd.c
index 25ad4ecf48a4..5963ce90150a 100644
--- a/xen/arch/x86/cpu/vpmu_amd.c
+++ b/xen/arch/x86/cpu/vpmu_amd.c
@@ -186,7 +186,7 @@ static void amd_vpmu_unset_msr_bitmap(struct vcpu *v)
     msr_bitmap_off(vpmu);
 }
 
-static int amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
+static int cf_check amd_vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
     return 1;
 }
@@ -206,7 +206,7 @@ static inline void context_load(struct vcpu *v)
     }
 }
 
-static int amd_vpmu_load(struct vcpu *v, bool_t from_guest)
+static int cf_check amd_vpmu_load(struct vcpu *v, bool from_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     struct xen_pmu_amd_ctxt *ctxt;
@@ -280,7 +280,7 @@ static inline void context_save(struct vcpu *v)
         rdmsrl(counters[i], counter_regs[i]);
 }
 
-static int amd_vpmu_save(struct vcpu *v,  bool_t to_guest)
+static int cf_check amd_vpmu_save(struct vcpu *v,  bool to_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     unsigned int i;
@@ -348,7 +348,7 @@ static void context_update(unsigned int msr, u64 msr_content)
     }
 }
 
-static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+static int cf_check amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
 {
     struct vcpu *v = current;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
@@ -404,7 +404,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
     return 0;
 }
 
-static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+static int cf_check amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 {
     struct vcpu *v = current;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
@@ -422,7 +422,7 @@ static int amd_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
     return 0;
 }
 
-static void amd_vpmu_destroy(struct vcpu *v)
+static void cf_check amd_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -440,7 +440,7 @@ static void amd_vpmu_destroy(struct vcpu *v)
 }
 
 /* VPMU part of the 'q' keyhandler */
-static void amd_vpmu_dump(const struct vcpu *v)
+static void cf_check amd_vpmu_dump(const struct vcpu *v)
 {
     const struct vpmu_struct *vpmu = vcpu_vpmu(v);
     const struct xen_pmu_amd_ctxt *ctxt = vpmu->context;
@@ -480,7 +480,7 @@ static void amd_vpmu_dump(const struct vcpu *v)
     }
 }
 
-static int svm_vpmu_initialise(struct vcpu *v)
+static int cf_check svm_vpmu_initialise(struct vcpu *v)
 {
     struct xen_pmu_amd_ctxt *ctxt;
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
diff --git a/xen/arch/x86/cpu/vpmu_intel.c b/xen/arch/x86/cpu/vpmu_intel.c
index 22dd4469d920..48b81ab6f018 100644
--- a/xen/arch/x86/cpu/vpmu_intel.c
+++ b/xen/arch/x86/cpu/vpmu_intel.c
@@ -288,7 +288,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
 }
 
-static int core2_vpmu_save(struct vcpu *v, bool_t to_guest)
+static int cf_check core2_vpmu_save(struct vcpu *v, bool to_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -407,7 +407,7 @@ static int core2_vpmu_verify(struct vcpu *v)
     return 0;
 }
 
-static int core2_vpmu_load(struct vcpu *v, bool_t from_guest)
+static int cf_check core2_vpmu_load(struct vcpu *v, bool from_guest)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -522,7 +522,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index)
     return 1;
 }
 
-static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
+static int cf_check core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
 {
     int i, tmp;
     int type = -1, index = -1;
@@ -690,7 +690,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content)
     return 0;
 }
 
-static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
+static int cf_check core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 {
     int type = -1, index = -1;
     struct vcpu *v = current;
@@ -730,7 +730,7 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
 }
 
 /* Dump vpmu info on console, called in the context of keyhandler 'q'. */
-static void core2_vpmu_dump(const struct vcpu *v)
+static void cf_check core2_vpmu_dump(const struct vcpu *v)
 {
     const struct vpmu_struct *vpmu = vcpu_vpmu(v);
     unsigned int i;
@@ -775,7 +775,7 @@ static void core2_vpmu_dump(const struct vcpu *v)
     }
 }
 
-static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
+static int cf_check core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
     u64 msr_content;
@@ -802,7 +802,7 @@ static int core2_vpmu_do_interrupt(struct cpu_user_regs *regs)
     return 1;
 }
 
-static void core2_vpmu_destroy(struct vcpu *v)
+static void cf_check core2_vpmu_destroy(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -816,7 +816,7 @@ static void core2_vpmu_destroy(struct vcpu *v)
     vpmu_clear(vpmu);
 }
 
-static int vmx_vpmu_initialise(struct vcpu *v)
+static int cf_check vmx_vpmu_initialise(struct vcpu *v)
 {
     struct vpmu_struct *vpmu = vcpu_vpmu(v);
     u64 msr_content;
diff --git a/xen/arch/x86/oprofile/op_model_athlon.c b/xen/arch/x86/oprofile/op_model_athlon.c
index 2177f02946e2..7bc5853a6c23 100644
--- a/xen/arch/x86/oprofile/op_model_athlon.c
+++ b/xen/arch/x86/oprofile/op_model_athlon.c
@@ -164,7 +164,7 @@ static inline u64 op_amd_randomize_ibs_op(u64 val)
     return val;
 }
 
-static void athlon_fill_in_addresses(struct op_msrs * const msrs)
+static void cf_check athlon_fill_in_addresses(struct op_msrs * const msrs)
 {
 	msrs->counters[0].addr = MSR_K7_PERFCTR0;
 	msrs->counters[1].addr = MSR_K7_PERFCTR1;
@@ -177,7 +177,7 @@ static void athlon_fill_in_addresses(struct op_msrs * const msrs)
 	msrs->controls[3].addr = MSR_K7_EVNTSEL3;
 }
 
-static void fam15h_fill_in_addresses(struct op_msrs * const msrs)
+static void cf_check fam15h_fill_in_addresses(struct op_msrs * const msrs)
 {
 	msrs->counters[0].addr = MSR_AMD_FAM15H_PERFCTR0;
 	msrs->counters[1].addr = MSR_AMD_FAM15H_PERFCTR1;
@@ -194,7 +194,7 @@ static void fam15h_fill_in_addresses(struct op_msrs * const msrs)
 	msrs->controls[5].addr = MSR_AMD_FAM15H_EVNTSEL5;
 }
 
-static void athlon_setup_ctrs(struct op_msrs const * const msrs)
+static void cf_check athlon_setup_ctrs(struct op_msrs const * const msrs)
 {
 	uint64_t msr_content;
 	int i;
@@ -308,9 +308,9 @@ static inline int handle_ibs(int mode, struct cpu_user_regs const * const regs)
     return 1;
 }
 
-static int athlon_check_ctrs(unsigned int const cpu,
-			     struct op_msrs const * const msrs,
-			     struct cpu_user_regs const * const regs)
+static int cf_check athlon_check_ctrs(
+	unsigned int const cpu, struct op_msrs const * const msrs,
+	struct cpu_user_regs const * const regs)
 
 {
 	uint64_t msr_content;
@@ -386,7 +386,7 @@ static inline void start_ibs(void)
 	}
 }
  
-static void athlon_start(struct op_msrs const * const msrs)
+static void cf_check athlon_start(struct op_msrs const * const msrs)
 {
 	uint64_t msr_content;
 	int i;
@@ -415,7 +415,7 @@ static void stop_ibs(void)
 		wrmsrl(MSR_AMD64_IBSOPCTL, 0);
 }
 
-static void athlon_stop(struct op_msrs const * const msrs)
+static void cf_check athlon_stop(struct op_msrs const * const msrs)
 {
 	uint64_t msr_content;
 	int i;
diff --git a/xen/arch/x86/oprofile/op_model_p4.c b/xen/arch/x86/oprofile/op_model_p4.c
index b08ba53cbd39..d047258644db 100644
--- a/xen/arch/x86/oprofile/op_model_p4.c
+++ b/xen/arch/x86/oprofile/op_model_p4.c
@@ -390,7 +390,7 @@ static unsigned int get_stagger(void)
 static unsigned long reset_value[NUM_COUNTERS_NON_HT];
 
 
-static void p4_fill_in_addresses(struct op_msrs * const msrs)
+static void cf_check p4_fill_in_addresses(struct op_msrs * const msrs)
 {
 	unsigned int i;
 	unsigned int addr, stag;
@@ -530,7 +530,7 @@ static void pmc_setup_one_p4_counter(unsigned int ctr)
 }
 
 
-static void p4_setup_ctrs(struct op_msrs const * const msrs)
+static void cf_check p4_setup_ctrs(struct op_msrs const * const msrs)
 {
 	unsigned int i;
 	uint64_t msr_content;
@@ -609,9 +609,9 @@ static void p4_setup_ctrs(struct op_msrs const * const msrs)
 	}
 }
 
-static int p4_check_ctrs(unsigned int const cpu,
-                         struct op_msrs const * const msrs,
-                         struct cpu_user_regs const * const regs)
+static int cf_check p4_check_ctrs(
+	unsigned int const cpu, struct op_msrs const * const msrs,
+	struct cpu_user_regs const * const regs)
 {
 	unsigned long ctr, stag, real;
 	uint64_t msr_content;
@@ -665,7 +665,7 @@ static int p4_check_ctrs(unsigned int const cpu,
 }
 
 
-static void p4_start(struct op_msrs const * const msrs)
+static void cf_check p4_start(struct op_msrs const * const msrs)
 {
 	unsigned int stag;
 	uint64_t msr_content;
@@ -683,7 +683,7 @@ static void p4_start(struct op_msrs const * const msrs)
 }
 
 
-static void p4_stop(struct op_msrs const * const msrs)
+static void cf_check p4_stop(struct op_msrs const * const msrs)
 {
 	unsigned int stag;
 	uint64_t msr_content;
diff --git a/xen/arch/x86/oprofile/op_model_ppro.c b/xen/arch/x86/oprofile/op_model_ppro.c
index 72c504a10216..8d7e13ea8777 100644
--- a/xen/arch/x86/oprofile/op_model_ppro.c
+++ b/xen/arch/x86/oprofile/op_model_ppro.c
@@ -63,7 +63,7 @@ static int counter_width = 32;
 static unsigned long reset_value[OP_MAX_COUNTER];
 int ppro_has_global_ctrl = 0;
 
-static void ppro_fill_in_addresses(struct op_msrs * const msrs)
+static void cf_check ppro_fill_in_addresses(struct op_msrs * const msrs)
 {
 	int i;
 
@@ -74,7 +74,7 @@ static void ppro_fill_in_addresses(struct op_msrs * const msrs)
 }
 
 
-static void ppro_setup_ctrs(struct op_msrs const * const msrs)
+static void cf_check ppro_setup_ctrs(struct op_msrs const * const msrs)
 {
 	uint64_t msr_content;
 	int i;
@@ -128,9 +128,9 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
 	}
 }
 
-static int ppro_check_ctrs(unsigned int const cpu,
-                           struct op_msrs const * const msrs,
-                           struct cpu_user_regs const * const regs)
+static int cf_check ppro_check_ctrs(
+	unsigned int const cpu, struct op_msrs const * const msrs,
+	struct cpu_user_regs const * const regs)
 {
 	u64 val;
 	int i;
@@ -170,7 +170,7 @@ static int ppro_check_ctrs(unsigned int const cpu,
 }
 
 
-static void ppro_start(struct op_msrs const * const msrs)
+static void cf_check ppro_start(struct op_msrs const * const msrs)
 {
 	uint64_t msr_content;
 	int i;
@@ -190,7 +190,7 @@ static void ppro_start(struct op_msrs const * const msrs)
 }
 
 
-static void ppro_stop(struct op_msrs const * const msrs)
+static void cf_check ppro_stop(struct op_msrs const * const msrs)
 {
 	uint64_t msr_content;
 	int i;
@@ -206,7 +206,7 @@ static void ppro_stop(struct op_msrs const * const msrs)
         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0ULL);
 }
 
-static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index)
+static int cf_check ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index)
 {
 	if ( (msr_index >= MSR_IA32_PERFCTR0) &&
             (msr_index < (MSR_IA32_PERFCTR0 + num_counters)) )
@@ -226,7 +226,7 @@ static int ppro_is_arch_pmu_msr(u64 msr_index, int *type, int *index)
         return 0;
 }
 
-static int ppro_allocate_msr(struct vcpu *v)
+static int cf_check ppro_allocate_msr(struct vcpu *v)
 {
 	struct vpmu_struct *vpmu = vcpu_vpmu(v);
 	struct arch_msr_pair *msr_content;
@@ -245,7 +245,7 @@ static int ppro_allocate_msr(struct vcpu *v)
 	return 0;
 }
 
-static void ppro_free_msr(struct vcpu *v)
+static void cf_check ppro_free_msr(struct vcpu *v)
 {
 	struct vpmu_struct *vpmu = vcpu_vpmu(v);
 
@@ -255,7 +255,8 @@ static void ppro_free_msr(struct vcpu *v)
 	vpmu_reset(vpmu, VPMU_PASSIVE_DOMAIN_ALLOCATED);
 }
 
-static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content)
+static void cf_check ppro_load_msr(
+	struct vcpu *v, int type, int index, u64 *msr_content)
 {
 	struct arch_msr_pair *msrs = vcpu_vpmu(v)->context;
 	switch ( type )
@@ -269,7 +270,8 @@ static void ppro_load_msr(struct vcpu *v, int type, int index, u64 *msr_content)
 	}
 }
 
-static void ppro_save_msr(struct vcpu *v, int type, int index, u64 msr_content)
+static void cf_check ppro_save_msr(
+	struct vcpu *v, int type, int index, u64 msr_content)
 {
 	struct arch_msr_pair *msrs = vcpu_vpmu(v)->context;
 
-- 
2.11.0



  parent reply	other threads:[~2022-02-14 13:05 UTC|newest]

Thread overview: 123+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-14 12:50 [PATCH v2 00/70] x86: Support for CET Indirect Branch Tracking Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 01/70] xen/domain: Improve pirq handling Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 02/70] xen/sort: Switch to an extern inline implementation Andrew Cooper
2022-02-14 13:13   ` Bertrand Marquis
2022-02-14 18:30     ` Andrew Cooper
2022-02-14 13:17   ` Julien Grall
2022-02-16  3:46     ` Stefano Stabellini
2022-02-16  9:29       ` Bertrand Marquis
2022-02-16 10:44       ` Andrew Cooper
2022-02-16 11:46         ` Julien Grall
2022-02-16 11:55           ` Bertrand Marquis
2022-02-14 12:50 ` [PATCH v2 03/70] xen/xsm: Move {do,compat}_flask_op() declarations into a header Andrew Cooper
2022-02-14 14:36   ` Daniel P. Smith
2022-02-14 12:50 ` [PATCH v2 04/70] x86/pv-shim: Don't modify the hypercall table Andrew Cooper
2022-02-14 13:33   ` Jan Beulich
2022-02-14 13:50     ` Andrew Cooper
2022-02-14 13:56       ` Jan Beulich
2022-02-16 22:17         ` Andrew Cooper
2022-02-17 10:20           ` Jan Beulich
2022-02-17 10:34             ` Juergen Gross
2022-02-21 19:21             ` Andrew Cooper
2022-02-22  8:41               ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 05/70] x86: Don't use the hypercall table for calling compat hypercalls Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 06/70] x86: Introduce support for CET-IBT Andrew Cooper
2022-02-15 14:01   ` Jan Beulich
2022-02-16 21:54     ` Andrew Cooper
2022-02-17 11:32       ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 07/70] x86: Build check for embedded endbr64 instructions Andrew Cooper
2022-02-15 15:12   ` Jan Beulich
2022-02-15 17:52     ` Andrew Cooper
2022-02-16  8:41       ` Jan Beulich
2022-02-16 11:55         ` Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 08/70] xen: CFI hardening for x86 hypercalls Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 09/70] xen: CFI hardening for custom_param() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 10/70] xen: CFI hardening for __initcall() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 11/70] xen: CFI hardening for notifier callbacks Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 12/70] xen: CFI hardening for acpi_table_parse() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 13/70] xen: CFI hardening for continue_hypercall_on_cpu() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 14/70] xen: CFI hardening for init_timer() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 15/70] xen: CFI hardening for call_rcu() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 16/70] xen: CFI hardening for IPIs Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 17/70] xen: CFI hardening for open_softirq() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 18/70] xsm/flask/ss: CFI hardening Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 19/70] xsm: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 20/70] xen/sched: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 21/70] xen/evtchn: " Andrew Cooper
2022-02-14 16:53   ` David Vrabel
2022-02-14 16:59     ` Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 22/70] xen/hypfs: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 23/70] xen/tasklet: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 24/70] xen/keyhandler: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 25/70] xen/vpci: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 26/70] xen/decompress: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 27/70] xen/iommu: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 28/70] xen/video: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 29/70] xen/console: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 30/70] xen/misc: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 31/70] x86: CFI hardening for request_irq() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 32/70] x86/hvm: CFI hardening for hvm_funcs Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 33/70] x86/hvm: CFI hardening for device emulation Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 34/70] x86/emul: CFI hardening Andrew Cooper
2022-02-14 13:38   ` Jan Beulich
2022-02-15 13:43     ` Andrew Cooper
2022-02-15 14:13       ` Jan Beulich
2022-02-16 21:34         ` Andrew Cooper
2022-02-17 11:49           ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 35/70] x86/ucode: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 36/70] x86/power: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 37/70] x86/apic: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 38/70] x86/nmi: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 39/70] x86/mtrr: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 40/70] x86/idle: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 41/70] x86/quirks: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 42/70] x86/hvmsave: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 43/70] x86/mce: " Andrew Cooper
2022-02-14 12:51 ` Andrew Cooper [this message]
2022-02-14 12:51 ` [PATCH v2 45/70] x86/cpu: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 46/70] x86/guest: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 47/70] x86/logdirty: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 48/70] x86/shadow: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 49/70] x86/hap: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 50/70] x86/p2m: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 51/70] x86/irq: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 52/70] x86/apei: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 53/70] x86/psr: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 54/70] x86/dpci: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 55/70] x86/pt: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 56/70] x86/time: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 57/70] x86/misc: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 58/70] x86/stack: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 59/70] x86/bugframe: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 60/70] x86: Use control flow typechecking where possible Andrew Cooper
2022-02-15 16:26   ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 61/70] x86/setup: Read CR4 earlier in __start_xen() Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 62/70] x86/alternatives: Clear CR4.CET when clearing CR0.WP Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 63/70] x86/traps: Rework write_stub_trampoline() to not hardcode the jmp Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 64/70] x86: Introduce helpers/checks for endbr64 instructions Andrew Cooper
2022-02-14 16:14   ` Andrew Cooper
2022-02-15 16:31   ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 65/70] x86/emul: Update emulation stubs to be CET-IBT compatible Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 66/70] x86/entry: Make syscall/sysenter entrypoints " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 67/70] x86/entry: Make IDT " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 68/70] x86/setup: Rework MSR_S_CET handling for CET-IBT Andrew Cooper
2022-02-15 16:46   ` Jan Beulich
2022-02-15 20:58     ` Andrew Cooper
2022-02-16  8:49       ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 69/70] x86/efi: Disable CET-IBT around Runtime Services calls Andrew Cooper
2022-02-15 16:53   ` Jan Beulich
2022-02-15 23:00     ` Andrew Cooper
2022-02-16  9:14       ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 70/70] x86: Enable CET Indirect Branch Tracking Andrew Cooper
2022-02-14 13:10 ` [PATCH v2 00/70] x86: Support for " Andrew Cooper
2022-02-14 13:43   ` Jan Beulich
2022-02-14 14:15     ` Andrew Cooper
2022-02-14 14:38       ` Jan Beulich
2022-02-16 21:59         ` Andrew Cooper
2022-02-17  9:56           ` Jan Beulich
2022-02-17 10:01 ` [PATCH v2.1 6.5/70] x86/kexec: Annotate embedded data with ELF metadata Andrew Cooper
2022-02-17 10:42   ` Jan Beulich
2022-02-17 12:06     ` Andrew Cooper
2022-02-17 14:48       ` Jan Beulich
2022-02-17 16:06         ` Andrew Cooper
2022-02-17 16:16           ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220214125127.17985-45-andrew.cooper3@citrix.com \
    --to=andrew.cooper3@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.