All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xenproject.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: [PATCH v2 33/70] x86/hvm: CFI hardening for device emulation
Date: Mon, 14 Feb 2022 12:50:50 +0000	[thread overview]
Message-ID: <20220214125127.17985-34-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <20220214125127.17985-1-andrew.cooper3@citrix.com>

Control Flow Integrity schemes use toolchain and optionally hardware support
to help protect against call/jump/return oriented programming attacks.

Use cf_check to annotate function pointer targets for the toolchain.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/emul-i8254.c                 |  8 +++----
 xen/arch/x86/hvm/emulate.c                | 21 ++++++++---------
 xen/arch/x86/hvm/hpet.c                   |  6 ++---
 xen/arch/x86/hvm/hvm.c                    |  2 +-
 xen/arch/x86/hvm/intercept.c              | 28 +++++++++++++----------
 xen/arch/x86/hvm/io.c                     | 38 +++++++++++++++++--------------
 xen/arch/x86/hvm/ioreq.c                  |  2 +-
 xen/arch/x86/hvm/pmtimer.c                |  4 ++--
 xen/arch/x86/hvm/rtc.c                    |  6 ++---
 xen/arch/x86/hvm/stdvga.c                 | 19 ++++++++--------
 xen/arch/x86/hvm/svm/svm.c                |  4 ++--
 xen/arch/x86/hvm/vioapic.c                |  8 +++----
 xen/arch/x86/hvm/vlapic.c                 | 11 +++++----
 xen/arch/x86/hvm/vmsi.c                   | 14 +++++++-----
 xen/arch/x86/hvm/vpic.c                   |  4 ++--
 xen/arch/x86/include/asm/hvm/vioapic.h    |  2 +-
 xen/drivers/passthrough/amd/iommu_guest.c | 10 ++++----
 17 files changed, 98 insertions(+), 89 deletions(-)

diff --git a/xen/arch/x86/emul-i8254.c b/xen/arch/x86/emul-i8254.c
index 050c784702af..0e09a173187f 100644
--- a/xen/arch/x86/emul-i8254.c
+++ b/xen/arch/x86/emul-i8254.c
@@ -48,9 +48,9 @@
 #define RW_STATE_WORD0 3
 #define RW_STATE_WORD1 4
 
-static int handle_pit_io(
+static int cf_check handle_pit_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val);
-static int handle_speaker_io(
+static int cf_check handle_speaker_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val);
 
 #define get_guest_time(v) \
@@ -505,7 +505,7 @@ void pit_deinit(struct domain *d)
 }
 
 /* the intercept action for PIT DM retval:0--not handled; 1--handled */  
-static int handle_pit_io(
+static int cf_check handle_pit_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct PITState *vpit = vcpu_vpit(current);
@@ -548,7 +548,7 @@ static uint32_t speaker_ioport_read(
             (pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
 }
 
-static int handle_speaker_io(
+static int cf_check handle_speaker_io(
     int dir, unsigned int port, uint32_t bytes, uint32_t *val)
 {
     struct PITState *vpit = vcpu_vpit(current);
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 2b3fb4d6ba05..39dac7fd9d6d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -71,19 +71,17 @@ static void hvmtrace_io_assist(const ioreq_t *p)
     trace_var(event, 0/*!cycles*/, size, buffer);
 }
 
-static int null_read(const struct hvm_io_handler *io_handler,
-                     uint64_t addr,
-                     uint32_t size,
-                     uint64_t *data)
+static int cf_check null_read(
+    const struct hvm_io_handler *io_handler, uint64_t addr, uint32_t size,
+    uint64_t *data)
 {
     *data = ~0ul;
     return X86EMUL_OKAY;
 }
 
-static int null_write(const struct hvm_io_handler *handler,
-                      uint64_t addr,
-                      uint32_t size,
-                      uint64_t data)
+static int cf_check null_write(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t data)
 {
     return X86EMUL_OKAY;
 }
@@ -114,10 +112,9 @@ static const struct hvm_io_handler null_handler = {
     .ops = &null_ops
 };
 
-static int ioreq_server_read(const struct hvm_io_handler *io_handler,
-                    uint64_t addr,
-                    uint32_t size,
-                    uint64_t *data)
+static int cf_check ioreq_server_read(
+    const struct hvm_io_handler *io_handler, uint64_t addr, uint32_t size,
+    uint64_t *data)
 {
     if ( hvm_copy_from_guest_phys(data, addr, size) != HVMTRANS_okay )
         return X86EMUL_UNHANDLEABLE;
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 8267f0b8a278..7bdb51cfa1c4 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -162,7 +162,7 @@ static inline int hpet_check_access_length(
     return 0;
 }
 
-static int hpet_read(
+static int cf_check hpet_read(
     struct vcpu *v, unsigned long addr, unsigned int length,
     unsigned long *pval)
 {
@@ -351,7 +351,7 @@ static void timer_sanitize_int_route(HPETState *h, unsigned int tn)
                   HPET_TN_ROUTE);
 }
 
-static int hpet_write(
+static int cf_check hpet_write(
     struct vcpu *v, unsigned long addr,
     unsigned int length, unsigned long val)
 {
@@ -569,7 +569,7 @@ static int hpet_write(
     return X86EMUL_OKAY;
 }
 
-static int hpet_range(struct vcpu *v, unsigned long addr)
+static int cf_check hpet_range(struct vcpu *v, unsigned long addr)
 {
     return ( (addr >= HPET_BASE_ADDRESS) &&
              (addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)) );
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5ec10f30803e..9e4924649077 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -543,7 +543,7 @@ void hvm_do_resume(struct vcpu *v)
     }
 }
 
-static int hvm_print_line(
+static int cf_check hvm_print_line(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct domain *cd = current->domain;
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 02ca3b05b05a..ffa31b746716 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -32,8 +32,8 @@
 #include <xen/event.h>
 #include <xen/iommu.h>
 
-static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler,
-                              const ioreq_t *p)
+static bool cf_check hvm_mmio_accept(
+    const struct hvm_io_handler *handler, const ioreq_t *p)
 {
     paddr_t first = ioreq_mmio_first_byte(p), last;
 
@@ -51,16 +51,18 @@ static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler,
     return 1;
 }
 
-static int hvm_mmio_read(const struct hvm_io_handler *handler,
-                         uint64_t addr, uint32_t size, uint64_t *data)
+static int cf_check hvm_mmio_read(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t *data)
 {
     BUG_ON(handler->type != IOREQ_TYPE_COPY);
 
     return handler->mmio.ops->read(current, addr, size, data);
 }
 
-static int hvm_mmio_write(const struct hvm_io_handler *handler,
-                          uint64_t addr, uint32_t size, uint64_t data)
+static int cf_check hvm_mmio_write(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t data)
 {
     BUG_ON(handler->type != IOREQ_TYPE_COPY);
 
@@ -73,8 +75,8 @@ static const struct hvm_io_ops mmio_ops = {
     .write = hvm_mmio_write
 };
 
-static bool_t hvm_portio_accept(const struct hvm_io_handler *handler,
-                                const ioreq_t *p)
+static bool cf_check hvm_portio_accept(
+    const struct hvm_io_handler *handler, const ioreq_t *p)
 {
     unsigned int start = handler->portio.port;
     unsigned int end = start + handler->portio.size;
@@ -84,8 +86,9 @@ static bool_t hvm_portio_accept(const struct hvm_io_handler *handler,
     return (p->addr >= start) && ((p->addr + p->size) <= end);
 }
 
-static int hvm_portio_read(const struct hvm_io_handler *handler,
-                           uint64_t addr, uint32_t size, uint64_t *data)
+static int cf_check hvm_portio_read(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t *data)
 {
     uint32_t val = ~0u;
     int rc;
@@ -98,8 +101,9 @@ static int hvm_portio_read(const struct hvm_io_handler *handler,
     return rc;
 }
 
-static int hvm_portio_write(const struct hvm_io_handler *handler,
-                            uint64_t addr, uint32_t size, uint64_t data)
+static int cf_check hvm_portio_write(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t data)
 {
     uint32_t val = data;
 
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 93f1d1503fa6..f70bfde90143 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -156,8 +156,8 @@ bool handle_pio(uint16_t port, unsigned int size, int dir)
     return true;
 }
 
-static bool_t g2m_portio_accept(const struct hvm_io_handler *handler,
-                                const ioreq_t *p)
+static bool cf_check g2m_portio_accept(
+    const struct hvm_io_handler *handler, const ioreq_t *p)
 {
     struct vcpu *curr = current;
     const struct hvm_domain *hvm = &curr->domain->arch.hvm;
@@ -179,8 +179,9 @@ static bool_t g2m_portio_accept(const struct hvm_io_handler *handler,
     return 0;
 }
 
-static int g2m_portio_read(const struct hvm_io_handler *handler,
-                           uint64_t addr, uint32_t size, uint64_t *data)
+static int cf_check g2m_portio_read(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t *data)
 {
     struct hvm_vcpu_io *hvio = &current->arch.hvm.hvm_io;
     const struct g2m_ioport *g2m_ioport = hvio->g2m_ioport;
@@ -204,8 +205,9 @@ static int g2m_portio_read(const struct hvm_io_handler *handler,
     return X86EMUL_OKAY;
 }
 
-static int g2m_portio_write(const struct hvm_io_handler *handler,
-                            uint64_t addr, uint32_t size, uint64_t data)
+static int cf_check g2m_portio_write(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t data)
 {
     struct hvm_vcpu_io *hvio = &current->arch.hvm.hvm_io;
     const struct g2m_ioport *g2m_ioport = hvio->g2m_ioport;
@@ -261,14 +263,15 @@ unsigned int hvm_pci_decode_addr(unsigned int cf8, unsigned int addr,
 }
 
 /* vPCI config space IO ports handlers (0xcf8/0xcfc). */
-static bool vpci_portio_accept(const struct hvm_io_handler *handler,
-                               const ioreq_t *p)
+static bool cf_check vpci_portio_accept(
+    const struct hvm_io_handler *handler, const ioreq_t *p)
 {
     return (p->addr == 0xcf8 && p->size == 4) || (p->addr & ~3) == 0xcfc;
 }
 
-static int vpci_portio_read(const struct hvm_io_handler *handler,
-                            uint64_t addr, uint32_t size, uint64_t *data)
+static int cf_check vpci_portio_read(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t *data)
 {
     const struct domain *d = current->domain;
     unsigned int reg;
@@ -299,8 +302,9 @@ static int vpci_portio_read(const struct hvm_io_handler *handler,
     return X86EMUL_OKAY;
 }
 
-static int vpci_portio_write(const struct hvm_io_handler *handler,
-                             uint64_t addr, uint32_t size, uint64_t data)
+static int cf_check vpci_portio_write(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t data)
 {
     struct domain *d = current->domain;
     unsigned int reg;
@@ -387,7 +391,7 @@ static unsigned int vpci_mmcfg_decode_addr(const struct hvm_mmcfg *mmcfg,
     return addr & (PCI_CFG_SPACE_EXP_SIZE - 1);
 }
 
-static int vpci_mmcfg_accept(struct vcpu *v, unsigned long addr)
+static int cf_check vpci_mmcfg_accept(struct vcpu *v, unsigned long addr)
 {
     struct domain *d = v->domain;
     bool found;
@@ -399,8 +403,8 @@ static int vpci_mmcfg_accept(struct vcpu *v, unsigned long addr)
     return found;
 }
 
-static int vpci_mmcfg_read(struct vcpu *v, unsigned long addr,
-                           unsigned int len, unsigned long *data)
+static int cf_check vpci_mmcfg_read(
+    struct vcpu *v, unsigned long addr, unsigned int len, unsigned long *data)
 {
     struct domain *d = v->domain;
     const struct hvm_mmcfg *mmcfg;
@@ -426,8 +430,8 @@ static int vpci_mmcfg_read(struct vcpu *v, unsigned long addr,
     return X86EMUL_OKAY;
 }
 
-static int vpci_mmcfg_write(struct vcpu *v, unsigned long addr,
-                            unsigned int len, unsigned long data)
+static int cf_check vpci_mmcfg_write(
+    struct vcpu *v, unsigned long addr, unsigned int len, unsigned long data)
 {
     struct domain *d = v->domain;
     const struct hvm_mmcfg *mmcfg;
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 02ad9db56523..8409d910d689 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -319,7 +319,7 @@ bool arch_ioreq_server_get_type_addr(const struct domain *d,
     return true;
 }
 
-static int hvm_access_cf8(
+static int cf_check hvm_access_cf8(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct domain *d = current->domain;
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 808819d1de91..60e3c8de4c6b 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -152,7 +152,7 @@ static void cf_check pmt_timer_callback(void *opaque)
 }
 
 /* Handle port I/O to the PM1a_STS and PM1a_EN registers */
-static int handle_evt_io(
+static int cf_check handle_evt_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct vcpu *v = current;
@@ -216,7 +216,7 @@ static int handle_evt_io(
 
 
 /* Handle port I/O to the TMR_VAL register */
-static int handle_pmt_io(
+static int cf_check handle_pmt_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct vcpu *v = current;
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index 09d3501276bc..bdc647e433e9 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -696,7 +696,7 @@ static uint32_t rtc_ioport_read(RTCState *s, uint32_t addr)
     return ret;
 }
 
-static int handle_rtc_io(
+static int cf_check handle_rtc_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct RTCState *vrtc = vcpu_vrtc(current);
@@ -809,8 +809,8 @@ void rtc_reset(struct domain *d)
 }
 
 /* RTC mediator for HVM hardware domain. */
-static int hw_rtc_io(int dir, unsigned int port, unsigned int size,
-                     uint32_t *val)
+static int cf_check hw_rtc_io(
+    int dir, unsigned int port, unsigned int size, uint32_t *val)
 {
     if ( dir == IOREQ_READ )
         *val = ~0;
diff --git a/xen/arch/x86/hvm/stdvga.c b/xen/arch/x86/hvm/stdvga.c
index ab9781d82a55..be8200c8d072 100644
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -199,7 +199,7 @@ static void stdvga_out(uint32_t port, uint32_t bytes, uint32_t val)
     }
 }
 
-static int stdvga_intercept_pio(
+static int cf_check stdvga_intercept_pio(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
@@ -302,8 +302,9 @@ static uint8_t stdvga_mem_readb(uint64_t addr)
     return ret;
 }
 
-static int stdvga_mem_read(const struct hvm_io_handler *handler,
-                           uint64_t addr, uint32_t size, uint64_t *p_data)
+static int cf_check stdvga_mem_read(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t *p_data)
 {
     uint64_t data = ~0ul;
 
@@ -453,9 +454,9 @@ static void stdvga_mem_writeb(uint64_t addr, uint32_t val)
     }
 }
 
-static int stdvga_mem_write(const struct hvm_io_handler *handler,
-                            uint64_t addr, uint32_t size,
-                            uint64_t data)
+static int cf_check stdvga_mem_write(
+    const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
+    uint64_t data)
 {
     struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
     ioreq_t p = {
@@ -514,8 +515,8 @@ static int stdvga_mem_write(const struct hvm_io_handler *handler,
     return ioreq_send(srv, &p, 1);
 }
 
-static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
-                                const ioreq_t *p)
+static bool cf_check stdvga_mem_accept(
+    const struct hvm_io_handler *handler, const ioreq_t *p)
 {
     struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
 
@@ -558,7 +559,7 @@ static bool_t stdvga_mem_accept(const struct hvm_io_handler *handler,
     return 0;
 }
 
-static void stdvga_mem_complete(const struct hvm_io_handler *handler)
+static void cf_check stdvga_mem_complete(const struct hvm_io_handler *handler)
 {
     struct hvm_hw_stdvga *s = &current->domain->arch.hvm.stdvga;
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index de6166241bf1..4c4ebda5e6e4 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1133,8 +1133,8 @@ static void svm_host_osvw_init(void)
     spin_unlock(&osvw_lock);
 }
 
-static int acpi_c1e_quirk(int dir, unsigned int port, unsigned int bytes,
-                          uint32_t *val)
+static int cf_check acpi_c1e_quirk(
+    int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     ASSERT(bytes == 1 && port == acpi_smi_cmd);
 
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 553c0f76eff8..b56549aa22d1 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -135,7 +135,7 @@ static uint32_t vioapic_read_indirect(const struct hvm_vioapic *vioapic)
     return result;
 }
 
-static int vioapic_read(
+static int cf_check vioapic_read(
     struct vcpu *v, unsigned long addr,
     unsigned int length, unsigned long *pval)
 {
@@ -351,7 +351,7 @@ static void vioapic_write_indirect(
     }
 }
 
-static int vioapic_write(
+static int cf_check vioapic_write(
     struct vcpu *v, unsigned long addr,
     unsigned int length, unsigned long val)
 {
@@ -383,7 +383,7 @@ static int vioapic_write(
     return X86EMUL_OKAY;
 }
 
-static int vioapic_range(struct vcpu *v, unsigned long addr)
+static int cf_check vioapic_range(struct vcpu *v, unsigned long addr)
 {
     return !!addr_vioapic(v->domain, addr);
 }
@@ -568,7 +568,7 @@ int vioapic_get_mask(const struct domain *d, unsigned int gsi)
     return vioapic->redirtbl[pin].fields.mask;
 }
 
-int vioapic_get_vector(const struct domain *d, unsigned int gsi)
+int cf_check vioapic_get_vector(const struct domain *d, unsigned int gsi)
 {
     unsigned int pin = 0; /* See gsi_vioapic */
     const struct hvm_vioapic *vioapic = gsi_vioapic(d, gsi, &pin);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index fe375912bef1..652e3cb87f12 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -615,8 +615,9 @@ static uint32_t vlapic_read_aligned(const struct vlapic *vlapic,
     return 0;
 }
 
-static int vlapic_mmio_read(struct vcpu *v, unsigned long address,
-                            unsigned int len, unsigned long *pval)
+static int cf_check vlapic_mmio_read(
+    struct vcpu *v, unsigned long address, unsigned int len,
+    unsigned long *pval)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
     unsigned int offset = address - vlapic_base_address(vlapic);
@@ -898,8 +899,8 @@ void vlapic_reg_write(struct vcpu *v, unsigned int reg, uint32_t val)
     }
 }
 
-static int vlapic_mmio_write(struct vcpu *v, unsigned long address,
-                             unsigned int len, unsigned long val)
+static int cf_check vlapic_mmio_write(
+    struct vcpu *v, unsigned long address, unsigned int len, unsigned long val)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
     unsigned int offset = address - vlapic_base_address(vlapic);
@@ -1052,7 +1053,7 @@ int guest_wrmsr_x2apic(struct vcpu *v, uint32_t msr, uint64_t msr_content)
     return X86EMUL_OKAY;
 }
 
-static int vlapic_range(struct vcpu *v, unsigned long addr)
+static int cf_check vlapic_range(struct vcpu *v, unsigned long addr)
 {
     struct vlapic *vlapic = vcpu_vlapic(v);
     unsigned long offset  = addr - vlapic_base_address(vlapic);
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index 2889575a2035..d4a8c953e23f 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -211,8 +211,9 @@ static struct msi_desc *msixtbl_addr_to_desc(
     return NULL;
 }
 
-static int msixtbl_read(const struct hvm_io_handler *handler,
-                        uint64_t address, uint32_t len, uint64_t *pval)
+static int cf_check msixtbl_read(
+    const struct hvm_io_handler *handler, uint64_t address, uint32_t len,
+    uint64_t *pval)
 {
     unsigned long offset;
     struct msixtbl_entry *entry;
@@ -350,14 +351,15 @@ static int msixtbl_write(struct vcpu *v, unsigned long address,
     return r;
 }
 
-static int _msixtbl_write(const struct hvm_io_handler *handler,
-                          uint64_t address, uint32_t len, uint64_t val)
+static int cf_check _msixtbl_write(
+    const struct hvm_io_handler *handler, uint64_t address, uint32_t len,
+    uint64_t val)
 {
     return msixtbl_write(current, address, len, val);
 }
 
-static bool_t msixtbl_range(const struct hvm_io_handler *handler,
-                            const ioreq_t *r)
+static bool cf_check msixtbl_range(
+    const struct hvm_io_handler *handler, const ioreq_t *r)
 {
     struct vcpu *curr = current;
     unsigned long addr = r->addr;
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index 91c2c6983393..5d8ef259b710 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -351,7 +351,7 @@ static uint32_t vpic_ioport_read(struct hvm_hw_vpic *vpic, uint32_t addr)
     return vpic->imr;
 }
 
-static int vpic_intercept_pic_io(
+static int cf_check vpic_intercept_pic_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct hvm_hw_vpic *vpic;
@@ -373,7 +373,7 @@ static int vpic_intercept_pic_io(
     return X86EMUL_OKAY;
 }
 
-static int vpic_intercept_elcr_io(
+static int cf_check vpic_intercept_elcr_io(
     int dir, unsigned int port, unsigned int bytes, uint32_t *val)
 {
     struct hvm_hw_vpic *vpic;
diff --git a/xen/arch/x86/include/asm/hvm/vioapic.h b/xen/arch/x86/include/asm/hvm/vioapic.h
index 36b64d20d60c..2944ec20dd53 100644
--- a/xen/arch/x86/include/asm/hvm/vioapic.h
+++ b/xen/arch/x86/include/asm/hvm/vioapic.h
@@ -66,7 +66,7 @@ void vioapic_irq_positive_edge(struct domain *d, unsigned int irq);
 void vioapic_update_EOI(struct domain *d, u8 vector);
 
 int vioapic_get_mask(const struct domain *d, unsigned int gsi);
-int vioapic_get_vector(const struct domain *d, unsigned int gsi);
+int cf_check vioapic_get_vector(const struct domain *d, unsigned int gsi);
 int vioapic_get_trigger_mode(const struct domain *d, unsigned int gsi);
 
 #endif /* __ASM_X86_HVM_VIOAPIC_H__ */
diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c
index 361ff864d846..80a331f546ed 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -645,8 +645,8 @@ static uint64_t iommu_mmio_read64(struct guest_iommu *iommu,
     return val;
 }
 
-static int guest_iommu_mmio_read(struct vcpu *v, unsigned long addr,
-                                 unsigned int len, unsigned long *pval)
+static int cf_check guest_iommu_mmio_read(
+    struct vcpu *v, unsigned long addr, unsigned int len, unsigned long *pval)
 {
     struct guest_iommu *iommu = vcpu_iommu(v);
     unsigned long offset;
@@ -735,8 +735,8 @@ static void guest_iommu_mmio_write64(struct guest_iommu *iommu,
     }
 }
 
-static int guest_iommu_mmio_write(struct vcpu *v, unsigned long addr,
-                                  unsigned int len, unsigned long val)
+static int cf_check guest_iommu_mmio_write(
+    struct vcpu *v, unsigned long addr, unsigned int len, unsigned long val)
 {
     struct guest_iommu *iommu = vcpu_iommu(v);
     unsigned long offset;
@@ -819,7 +819,7 @@ static void guest_iommu_reg_init(struct guest_iommu *iommu)
     iommu->reg_ext_feature = ef;
 }
 
-static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr)
+static int cf_check guest_iommu_mmio_range(struct vcpu *v, unsigned long addr)
 {
     struct guest_iommu *iommu = vcpu_iommu(v);
 
-- 
2.11.0



  parent reply	other threads:[~2022-02-14 13:06 UTC|newest]

Thread overview: 123+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-14 12:50 [PATCH v2 00/70] x86: Support for CET Indirect Branch Tracking Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 01/70] xen/domain: Improve pirq handling Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 02/70] xen/sort: Switch to an extern inline implementation Andrew Cooper
2022-02-14 13:13   ` Bertrand Marquis
2022-02-14 18:30     ` Andrew Cooper
2022-02-14 13:17   ` Julien Grall
2022-02-16  3:46     ` Stefano Stabellini
2022-02-16  9:29       ` Bertrand Marquis
2022-02-16 10:44       ` Andrew Cooper
2022-02-16 11:46         ` Julien Grall
2022-02-16 11:55           ` Bertrand Marquis
2022-02-14 12:50 ` [PATCH v2 03/70] xen/xsm: Move {do,compat}_flask_op() declarations into a header Andrew Cooper
2022-02-14 14:36   ` Daniel P. Smith
2022-02-14 12:50 ` [PATCH v2 04/70] x86/pv-shim: Don't modify the hypercall table Andrew Cooper
2022-02-14 13:33   ` Jan Beulich
2022-02-14 13:50     ` Andrew Cooper
2022-02-14 13:56       ` Jan Beulich
2022-02-16 22:17         ` Andrew Cooper
2022-02-17 10:20           ` Jan Beulich
2022-02-17 10:34             ` Juergen Gross
2022-02-21 19:21             ` Andrew Cooper
2022-02-22  8:41               ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 05/70] x86: Don't use the hypercall table for calling compat hypercalls Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 06/70] x86: Introduce support for CET-IBT Andrew Cooper
2022-02-15 14:01   ` Jan Beulich
2022-02-16 21:54     ` Andrew Cooper
2022-02-17 11:32       ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 07/70] x86: Build check for embedded endbr64 instructions Andrew Cooper
2022-02-15 15:12   ` Jan Beulich
2022-02-15 17:52     ` Andrew Cooper
2022-02-16  8:41       ` Jan Beulich
2022-02-16 11:55         ` Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 08/70] xen: CFI hardening for x86 hypercalls Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 09/70] xen: CFI hardening for custom_param() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 10/70] xen: CFI hardening for __initcall() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 11/70] xen: CFI hardening for notifier callbacks Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 12/70] xen: CFI hardening for acpi_table_parse() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 13/70] xen: CFI hardening for continue_hypercall_on_cpu() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 14/70] xen: CFI hardening for init_timer() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 15/70] xen: CFI hardening for call_rcu() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 16/70] xen: CFI hardening for IPIs Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 17/70] xen: CFI hardening for open_softirq() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 18/70] xsm/flask/ss: CFI hardening Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 19/70] xsm: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 20/70] xen/sched: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 21/70] xen/evtchn: " Andrew Cooper
2022-02-14 16:53   ` David Vrabel
2022-02-14 16:59     ` Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 22/70] xen/hypfs: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 23/70] xen/tasklet: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 24/70] xen/keyhandler: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 25/70] xen/vpci: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 26/70] xen/decompress: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 27/70] xen/iommu: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 28/70] xen/video: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 29/70] xen/console: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 30/70] xen/misc: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 31/70] x86: CFI hardening for request_irq() Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 32/70] x86/hvm: CFI hardening for hvm_funcs Andrew Cooper
2022-02-14 12:50 ` Andrew Cooper [this message]
2022-02-14 12:50 ` [PATCH v2 34/70] x86/emul: CFI hardening Andrew Cooper
2022-02-14 13:38   ` Jan Beulich
2022-02-15 13:43     ` Andrew Cooper
2022-02-15 14:13       ` Jan Beulich
2022-02-16 21:34         ` Andrew Cooper
2022-02-17 11:49           ` Jan Beulich
2022-02-14 12:50 ` [PATCH v2 35/70] x86/ucode: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 36/70] x86/power: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 37/70] x86/apic: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 38/70] x86/nmi: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 39/70] x86/mtrr: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 40/70] x86/idle: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 41/70] x86/quirks: " Andrew Cooper
2022-02-14 12:50 ` [PATCH v2 42/70] x86/hvmsave: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 43/70] x86/mce: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 44/70] x86/pmu: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 45/70] x86/cpu: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 46/70] x86/guest: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 47/70] x86/logdirty: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 48/70] x86/shadow: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 49/70] x86/hap: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 50/70] x86/p2m: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 51/70] x86/irq: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 52/70] x86/apei: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 53/70] x86/psr: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 54/70] x86/dpci: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 55/70] x86/pt: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 56/70] x86/time: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 57/70] x86/misc: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 58/70] x86/stack: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 59/70] x86/bugframe: " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 60/70] x86: Use control flow typechecking where possible Andrew Cooper
2022-02-15 16:26   ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 61/70] x86/setup: Read CR4 earlier in __start_xen() Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 62/70] x86/alternatives: Clear CR4.CET when clearing CR0.WP Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 63/70] x86/traps: Rework write_stub_trampoline() to not hardcode the jmp Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 64/70] x86: Introduce helpers/checks for endbr64 instructions Andrew Cooper
2022-02-14 16:14   ` Andrew Cooper
2022-02-15 16:31   ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 65/70] x86/emul: Update emulation stubs to be CET-IBT compatible Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 66/70] x86/entry: Make syscall/sysenter entrypoints " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 67/70] x86/entry: Make IDT " Andrew Cooper
2022-02-14 12:51 ` [PATCH v2 68/70] x86/setup: Rework MSR_S_CET handling for CET-IBT Andrew Cooper
2022-02-15 16:46   ` Jan Beulich
2022-02-15 20:58     ` Andrew Cooper
2022-02-16  8:49       ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 69/70] x86/efi: Disable CET-IBT around Runtime Services calls Andrew Cooper
2022-02-15 16:53   ` Jan Beulich
2022-02-15 23:00     ` Andrew Cooper
2022-02-16  9:14       ` Jan Beulich
2022-02-14 12:51 ` [PATCH v2 70/70] x86: Enable CET Indirect Branch Tracking Andrew Cooper
2022-02-14 13:10 ` [PATCH v2 00/70] x86: Support for " Andrew Cooper
2022-02-14 13:43   ` Jan Beulich
2022-02-14 14:15     ` Andrew Cooper
2022-02-14 14:38       ` Jan Beulich
2022-02-16 21:59         ` Andrew Cooper
2022-02-17  9:56           ` Jan Beulich
2022-02-17 10:01 ` [PATCH v2.1 6.5/70] x86/kexec: Annotate embedded data with ELF metadata Andrew Cooper
2022-02-17 10:42   ` Jan Beulich
2022-02-17 12:06     ` Andrew Cooper
2022-02-17 14:48       ` Jan Beulich
2022-02-17 16:06         ` Andrew Cooper
2022-02-17 16:16           ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220214125127.17985-34-andrew.cooper3@citrix.com \
    --to=andrew.cooper3@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.