xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: "Wei Liu" <wei.liu2@citrix.com>,
	"Jan Beulich" <JBeulich@suse.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Suravee Suthikulpanit" <suravee.suthikulpanit@amd.com>,
	"Brian Woods" <brian.woods@amd.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH 03/14] AMD/IOMMU: Fix multiple reference counting errors
Date: Wed, 21 Nov 2018 13:21:11 +0000	[thread overview]
Message-ID: <1542806482-24030-4-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1542806482-24030-1-git-send-email-andrew.cooper3@citrix.com>

Most of these issues would be XSAs if these paths were accessible to guests.

First, override the {get,put}_gfn() helpers to use gfn_t, which was the
original purpose of this patch.

guest_iommu_get_table_mfn() has two bugs.  First, it gets a ref on one gfn,
and puts a ref for a different gfn.  This is only a latent bug for now, as we
don't do per-gfn locking yet.  Next, the mfn return value is unsafe to use
after put_gfn() is called, as the guest could have freed the page in the
meantime.

In addition, get_gfn_from_base_reg() erroneously asserts that base_raw can't
be 0, but it may legitimately be.  On top of that, the return value from
guest_iommu_get_table_mfn() is passed into map_domain_page() before checking
that it is a real mfn.

Most of the complexity here is inlining guest_iommu_get_table_mfn() and
holding the gfn reference until the operation is complete.

Furthermore, guest_iommu_process_command() is altered to take a local copy of
cmd_entry_t, rather than passing a pointer to guest controlled memory into
each of the handling functions.  It is also modified to break on error rather
than continue.  These changes are in line with the spec which states that the
IOMMU will strictly read a command entry once, and will cease processing if an
error is encountered.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
CC: Brian Woods <brian.woods@amd.com>

This patch my no means indicates that the code is ready for production use.
---
 xen/drivers/passthrough/amd/iommu_guest.c | 224 +++++++++++++++++++-----------
 1 file changed, 146 insertions(+), 78 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c
index 96175bb..03ca0cf 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -21,6 +21,13 @@
 #include <asm/amd-iommu.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
 
+/* Override {get,put}_gfn to work with gfn_t */
+#undef get_gfn
+#define get_gfn(d, g, t) get_gfn_type(d, gfn_x(g), t, P2M_ALLOC)
+#undef get_gfn_query
+#define get_gfn_query(d, g, t) get_gfn_type(d, gfn_x(g), t, 0)
+#undef put_gfn
+#define put_gfn(d, g) __put_gfn(p2m_get_hostp2m(d), gfn_x(g))
 
 #define IOMMU_MMIO_SIZE                         0x8000
 #define IOMMU_MMIO_PAGE_NR                      0x8
@@ -117,13 +124,6 @@ static unsigned int host_domid(struct domain *d, uint64_t g_domid)
     return d->domain_id;
 }
 
-static unsigned long get_gfn_from_base_reg(uint64_t base_raw)
-{
-    base_raw &= PADDR_MASK;
-    ASSERT ( base_raw != 0 );
-    return base_raw >> PAGE_SHIFT;
-}
-
 static void guest_iommu_deliver_msi(struct domain *d)
 {
     uint8_t vector, dest, dest_mode, delivery_mode, trig_mode;
@@ -138,23 +138,6 @@ static void guest_iommu_deliver_msi(struct domain *d)
     vmsi_deliver(d, vector, dest, dest_mode, delivery_mode, trig_mode);
 }
 
-static unsigned long guest_iommu_get_table_mfn(struct domain *d,
-                                               uint64_t base_raw,
-                                               unsigned int entry_size,
-                                               unsigned int pos)
-{
-    unsigned long idx, gfn, mfn;
-    p2m_type_t p2mt;
-
-    gfn = get_gfn_from_base_reg(base_raw);
-    idx = (pos * entry_size) >> PAGE_SHIFT;
-
-    mfn = mfn_x(get_gfn(d, gfn + idx, &p2mt));
-    put_gfn(d, gfn);
-
-    return mfn;
-}
-
 static void guest_iommu_enable_dev_table(struct guest_iommu *iommu)
 {
     uint32_t length_raw = get_field_from_reg_u32(iommu->dev_table.reg_base.lo,
@@ -176,7 +159,10 @@ static void guest_iommu_enable_ring_buffer(struct guest_iommu *iommu,
 void guest_iommu_add_ppr_log(struct domain *d, u32 entry[])
 {
     uint16_t gdev_id;
-    unsigned long mfn, tail, head;
+    unsigned long tail, head;
+    mfn_t mfn;
+    gfn_t gfn;
+    p2m_type_t p2mt;
     ppr_entry_t *log, *log_base;
     struct guest_iommu *iommu;
 
@@ -197,11 +183,24 @@ void guest_iommu_add_ppr_log(struct domain *d, u32 entry[])
         return;
     }
 
-    mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->ppr_log.reg_base),
-                                    sizeof(ppr_entry_t), tail);
-    ASSERT(mfn_valid(_mfn(mfn)));
+    gfn = _gfn(PFN_DOWN(reg_to_u64(iommu->ppr_log.reg_base)) +
+               PFN_DOWN(tail * sizeof(*log)));
 
-    log_base = map_domain_page(_mfn(mfn));
+    mfn = get_gfn(d, gfn, &p2mt);
+    if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_ram(p2mt) )
+    {
+        AMD_IOMMU_DEBUG(
+            "Error: guest iommu ppr log bad gfn %"PRI_gfn", type %u, mfn %"
+            PRI_mfn", reg_base %#"PRIx64", tail %#lx\n",
+            gfn_x(gfn), p2mt, mfn_x(mfn),
+            reg_to_u64(iommu->ppr_log.reg_base), tail);
+        guest_iommu_disable(iommu);
+        goto out;
+    }
+
+    ASSERT(mfn_valid(mfn));
+
+    log_base = map_domain_page(mfn);
     log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
 
     /* Convert physical device id back into virtual device id */
@@ -220,12 +219,18 @@ void guest_iommu_add_ppr_log(struct domain *d, u32 entry[])
     unmap_domain_page(log_base);
 
     guest_iommu_deliver_msi(d);
+
+out:
+    put_gfn(d, gfn);
 }
 
 void guest_iommu_add_event_log(struct domain *d, u32 entry[])
 {
     uint16_t dev_id;
-    unsigned long mfn, tail, head;
+    unsigned long tail, head;
+    mfn_t mfn;
+    gfn_t gfn;
+    p2m_type_t p2mt;
     event_entry_t *log, *log_base;
     struct guest_iommu *iommu;
 
@@ -246,11 +251,24 @@ void guest_iommu_add_event_log(struct domain *d, u32 entry[])
         return;
     }
 
-    mfn = guest_iommu_get_table_mfn(d, reg_to_u64(iommu->event_log.reg_base),
-                                    sizeof(event_entry_t), tail);
-    ASSERT(mfn_valid(_mfn(mfn)));
+    gfn = _gfn(PFN_DOWN(reg_to_u64(iommu->event_log.reg_base)) +
+               PFN_DOWN(tail * sizeof(*log)));
+
+    mfn = get_gfn(d, gfn, &p2mt);
+    if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_ram(p2mt) )
+    {
+        AMD_IOMMU_DEBUG(
+            "Error: guest iommu event log bad gfn %"PRI_gfn", type %u, mfn %"
+            PRI_mfn", reg_base %#"PRIx64", tail %#lx\n",
+            gfn_x(gfn), p2mt, mfn_x(mfn),
+            reg_to_u64(iommu->ppr_log.reg_base), tail);
+        guest_iommu_disable(iommu);
+        goto out;
+    }
+
+    ASSERT(mfn_valid(mfn));
 
-    log_base = map_domain_page(_mfn(mfn));
+    log_base = map_domain_page(mfn);
     log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
 
     /* re-write physical device id into virtual device id */
@@ -269,6 +287,9 @@ void guest_iommu_add_event_log(struct domain *d, u32 entry[])
     unmap_domain_page(log_base);
 
     guest_iommu_deliver_msi(d);
+
+out:
+    put_gfn(d, gfn);
 }
 
 static int do_complete_ppr_request(struct domain *d, cmd_entry_t *cmd)
@@ -346,10 +367,8 @@ static int do_invalidate_iotlb_pages(struct domain *d, cmd_entry_t *cmd)
 
 static int do_completion_wait(struct domain *d, cmd_entry_t *cmd)
 {
-    bool_t com_wait_int_en, com_wait_int, i, s;
+    bool com_wait_int_en, com_wait_int, i, s;
     struct guest_iommu *iommu;
-    unsigned long gfn;
-    p2m_type_t p2mt;
 
     iommu = domain_iommu(d);
 
@@ -362,7 +381,10 @@ static int do_completion_wait(struct domain *d, cmd_entry_t *cmd)
     if ( s )
     {
         uint64_t gaddr_lo, gaddr_hi, gaddr_64, data;
-        void *vaddr;
+        mfn_t mfn;
+        gfn_t gfn;
+        p2m_type_t p2mt;
+        uint64_t *ptr;
 
         data = (uint64_t)cmd->data[3] << 32 | cmd->data[2];
         gaddr_lo = get_field_from_reg_u32(cmd->data[0],
@@ -374,13 +396,24 @@ static int do_completion_wait(struct domain *d, cmd_entry_t *cmd)
 
         gaddr_64 = (gaddr_hi << 32) | (gaddr_lo << 3);
 
-        gfn = gaddr_64 >> PAGE_SHIFT;
-        vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt));
-        put_gfn(d, gfn);
+        gfn = _gfn(gaddr_64 >> PAGE_SHIFT);
+        mfn = get_gfn(d, gfn, &p2mt);
 
-        write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE-1))),
-                         data);
-        unmap_domain_page(vaddr);
+        if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_ram(p2mt) )
+        {
+            /* XXX - What to do here, error wise? */
+            guest_iommu_disable(iommu);
+            put_gfn(d, gfn);
+
+            return 0;
+        }
+
+        ptr = map_domain_page(mfn) + (gaddr_64 & ~PAGE_MASK);
+
+        write_u64_atomic(ptr, data);
+        unmap_domain_page(ptr);
+
+        put_gfn(d, gfn);
     }
 
     com_wait_int_en = iommu_get_bit(iommu->reg_ctrl.lo,
@@ -400,9 +433,10 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd)
     dev_entry_t *gdte, *mdte, *dte_base;
     struct amd_iommu *iommu = NULL;
     struct guest_iommu *g_iommu;
-    uint64_t gcr3_gfn, gcr3_mfn;
+    mfn_t dte_mfn, gcr3_mfn;
+    gfn_t dte_gfn, gcr3_gfn;
     uint8_t glx, gv;
-    unsigned long dte_mfn, flags;
+    unsigned long flags;
     p2m_type_t p2mt;
 
     g_iommu = domain_iommu(d);
@@ -417,35 +451,49 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd)
     if ( (gbdf * sizeof(dev_entry_t)) > g_iommu->dev_table.size )
         return 0;
 
-    dte_mfn = guest_iommu_get_table_mfn(d,
-                                        reg_to_u64(g_iommu->dev_table.reg_base),
-                                        sizeof(dev_entry_t), gbdf);
-    ASSERT(mfn_valid(_mfn(dte_mfn)));
+    dte_gfn = _gfn(PFN_DOWN(reg_to_u64(g_iommu->dev_table.reg_base)) +
+                   PFN_DOWN(gbdf * sizeof(*gdte)));
+    dte_mfn = get_gfn(d, dte_gfn, &p2mt);
+
+    if ( mfn_eq(dte_mfn, INVALID_MFN) || !p2m_is_ram(p2mt) )
+    {
+        put_gfn(d, dte_gfn);
+        return 0;
+    }
+
+    ASSERT(mfn_valid(dte_mfn));
 
     /* Read guest dte information */
-    dte_base = map_domain_page(_mfn(dte_mfn));
+    dte_base = map_domain_page(dte_mfn);
 
     gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t));
 
     gdom_id  = get_domid_from_dte(gdte);
-    gcr3_gfn = get_guest_cr3_from_dte(gdte);
+    gcr3_gfn = _gfn(get_guest_cr3_from_dte(gdte));
     glx      = get_glx_from_dte(gdte);
     gv       = get_gv_from_dte(gdte);
 
     unmap_domain_page(dte_base);
+    put_gfn(d, dte_gfn);
 
     /* Do not update host dte before gcr3 has been set */
-    if ( gcr3_gfn == 0 )
+    if ( gfn_x(gcr3_gfn) == 0 )
         return 0;
 
-    gcr3_mfn = mfn_x(get_gfn(d, gcr3_gfn, &p2mt));
-    put_gfn(d, gcr3_gfn);
+    gcr3_mfn = get_gfn(d, gcr3_gfn, &p2mt);
 
-    ASSERT(mfn_valid(_mfn(gcr3_mfn)));
+    if ( mfn_eq(gcr3_mfn, INVALID_MFN) || !p2m_is_ram(p2mt) )
+    {
+        put_gfn(d, gcr3_gfn);
+        return 0;
+    }
+
+    ASSERT(mfn_valid(gcr3_mfn));
 
     iommu = find_iommu_for_device(0, mbdf);
     if ( !iommu )
     {
+        put_gfn(d, gcr3_gfn);
         AMD_IOMMU_DEBUG("%s: Fail to find iommu for bdf %x!\n",
                         __func__, mbdf);
         return -ENODEV;
@@ -458,18 +506,19 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t *cmd)
 
     spin_lock_irqsave(&iommu->lock, flags);
     iommu_dte_set_guest_cr3((u32 *)mdte, hdom_id,
-                            gcr3_mfn << PAGE_SHIFT, gv, glx);
+                            mfn_to_maddr(gcr3_mfn), gv, glx);
 
     amd_iommu_flush_device(iommu, req_id);
     spin_unlock_irqrestore(&iommu->lock, flags);
 
+    put_gfn(d, gcr3_gfn);
+
     return 0;
 }
 
 static void guest_iommu_process_command(unsigned long _d)
 {
-    unsigned long opcode, tail, head, entries_per_page, cmd_mfn;
-    cmd_entry_t *cmd, *cmd_base;
+    unsigned long tail, head;
     struct domain *d = (struct domain *)_d;
     struct guest_iommu *iommu;
 
@@ -493,56 +542,75 @@ static void guest_iommu_process_command(unsigned long _d)
         return;
     }
 
-    entries_per_page = PAGE_SIZE / sizeof(cmd_entry_t);
-
     while ( head != tail )
     {
+        mfn_t mfn;
+        gfn_t gfn;
+        p2m_type_t p2mt;
+        cmd_entry_t cmd, *ptr;
         int ret = 0;
+        unsigned int opcode;
+
+        gfn = _gfn(PFN_DOWN(reg_to_u64(iommu->cmd_buffer.reg_base)) +
+                   PFN_DOWN(head * sizeof(cmd)));
 
-        cmd_mfn = guest_iommu_get_table_mfn(d,
-                                            reg_to_u64(iommu->cmd_buffer.reg_base),
-                                            sizeof(cmd_entry_t), head);
-        ASSERT(mfn_valid(_mfn(cmd_mfn)));
+        mfn = get_gfn(d, gfn, &p2mt);
+        if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_ram(p2mt) )
+        {
+            AMD_IOMMU_DEBUG(
+                "Error: guest iommu cmd buffer bad gfn %"PRI_gfn", type %u, mfn %"
+                PRI_mfn", reg_base %#"PRIx64", head %#lx\n",
+                gfn_x(gfn), p2mt, mfn_x(mfn),
+                reg_to_u64(iommu->cmd_buffer.reg_base), head);
+            put_gfn(d, gfn);
+            guest_iommu_disable(iommu);
+            return;
+        }
 
-        cmd_base = map_domain_page(_mfn(cmd_mfn));
-        cmd = cmd_base + head % entries_per_page;
+        ptr = map_domain_page(mfn) + head % (PAGE_SIZE / sizeof(cmd_entry_t));
+        memcpy(&cmd, ptr, sizeof(cmd));
+        unmap_domain_page(ptr);
+        put_gfn(d, gfn);
 
-        opcode = get_field_from_reg_u32(cmd->data[1],
+        opcode = get_field_from_reg_u32(cmd.data[1],
                                         IOMMU_CMD_OPCODE_MASK,
                                         IOMMU_CMD_OPCODE_SHIFT);
         switch ( opcode )
         {
         case IOMMU_CMD_COMPLETION_WAIT:
-            ret = do_completion_wait(d, cmd);
+            ret = do_completion_wait(d, &cmd);
             break;
         case IOMMU_CMD_INVALIDATE_DEVTAB_ENTRY:
-            ret = do_invalidate_dte(d, cmd);
+            ret = do_invalidate_dte(d, &cmd);
             break;
         case IOMMU_CMD_INVALIDATE_IOMMU_PAGES:
-            ret = do_invalidate_pages(d, cmd);
+            ret = do_invalidate_pages(d, &cmd);
             break;
         case IOMMU_CMD_INVALIDATE_IOTLB_PAGES:
-            ret = do_invalidate_iotlb_pages(d, cmd);
+            ret = do_invalidate_iotlb_pages(d, &cmd);
             break;
         case IOMMU_CMD_INVALIDATE_INT_TABLE:
             break;
         case IOMMU_CMD_COMPLETE_PPR_REQUEST:
-            ret = do_complete_ppr_request(d, cmd);
+            ret = do_complete_ppr_request(d, &cmd);
             break;
         case IOMMU_CMD_INVALIDATE_IOMMU_ALL:
-            ret = do_invalidate_all(d, cmd);
+            ret = do_invalidate_all(d, &cmd);
             break;
         default:
-            AMD_IOMMU_DEBUG("CMD: Unknown command cmd_type = %lx "
+            AMD_IOMMU_DEBUG("CMD: Unknown command cmd_type = %#x "
                             "head = %ld\n", opcode, head);
+            ret = -EINVAL;
             break;
         }
 
-        unmap_domain_page(cmd_base);
         if ( ++head >= iommu->cmd_buffer.entries )
             head = 0;
         if ( ret )
+        {
             guest_iommu_disable(iommu);
+            break;
+        }
     }
 
     /* Now shift cmd buffer head pointer */
@@ -818,10 +886,10 @@ int guest_iommu_set_base(struct domain *d, uint64_t base)
 
     for ( int i = 0; i < IOMMU_MMIO_PAGE_NR; i++ )
     {
-        unsigned long gfn = base + i;
+        gfn_t gfn = _gfn(base + i);
 
         get_gfn_query(d, gfn, &t);
-        p2m_change_type_one(d, gfn, t, p2m_mmio_dm);
+        p2m_change_type_one(d, gfn_x(gfn), t, p2m_mmio_dm);
         put_gfn(d, gfn);
     }
 
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-11-21 13:21 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-21 13:21 [PATCH 00/14] XSA-277 followup Andrew Cooper
2018-11-21 13:21 ` [PATCH 01/14] x86/soft-reset: Drop gfn reference after calling get_gfn_query() Andrew Cooper
2018-11-22 14:45   ` Jan Beulich
2018-11-21 13:21 ` [PATCH 02/14] x86/mem-sharing: Don't leave the altp2m lock held when nominating a page Andrew Cooper
2018-11-21 16:56   ` Tamas K Lengyel
2018-11-22 14:46   ` Jan Beulich
2018-11-21 13:21 ` Andrew Cooper [this message]
2018-11-22 14:51   ` [PATCH 03/14] AMD/IOMMU: Fix multiple reference counting errors Jan Beulich
2018-11-22 17:46     ` Andrew Cooper
2018-11-23  8:23       ` Jan Beulich
2018-11-23 16:03         ` Andrew Cooper
2018-11-26  9:05           ` Jan Beulich
2019-01-31 15:59   ` Woods, Brian
2018-11-21 13:21 ` [PATCH 04/14] x86/p2m: Fix locking in p2m_altp2m_lazy_copy() Andrew Cooper
2018-11-21 13:59   ` Razvan Cojocaru
2018-11-22 15:01   ` Jan Beulich
2018-12-05 19:53     ` Andrew Cooper
2018-11-21 13:21 ` [PATCH 05/14] x86/p2m: Don't overwrite p2m_altp2m_lazy_copy()'s callers p2m pointer Andrew Cooper
2018-11-21 14:07   ` Razvan Cojocaru
2018-11-22 15:03   ` Jan Beulich
2018-11-21 13:21 ` [PATCH 06/14] x86/hvm: Make the altp2m locking easier to follow Andrew Cooper
2018-11-21 14:43   ` Razvan Cojocaru
2018-11-22 15:08   ` Jan Beulich
2018-11-21 13:21 ` [PATCH 07/14] x86/p2m: Coding style cleanup Andrew Cooper
2018-11-22 15:12   ` Jan Beulich
2018-11-21 13:21 ` [PATCH 08/14] xen/memory: Drop ARM put_gfn() stub Andrew Cooper
2018-11-22 14:07   ` Julien Grall
2018-11-22 15:15   ` Jan Beulich
2018-11-21 13:21 ` [PATCH 09/14] x86/p2m: Switch the two_gfns infrastructure to using gfn_t Andrew Cooper
2018-11-21 17:06   ` Tamas K Lengyel
2018-11-22 15:17   ` Jan Beulich
2018-11-21 13:21 ` [PATCH 10/14] x86/mm: Switch {get, put}_gfn() " Andrew Cooper
2018-11-21 14:48   ` Razvan Cojocaru
2018-11-21 16:12   ` Paul Durrant
2018-11-22 15:34   ` Jan Beulich
2018-11-23  2:58   ` Tian, Kevin
2018-11-26 15:51   ` Woods, Brian
2018-11-21 13:21 ` [PATCH 11/14] xen/mm: Switch mfn_to_virt()/virt_to_mfn() to using mfn_t Andrew Cooper
2018-11-22 14:32   ` Julien Grall
2018-11-22 15:44   ` Jan Beulich
2018-11-22 15:49     ` Andrew Cooper
2018-11-21 13:21 ` [PATCH 12/14] xen/gnttab: Drop gnttab_create_{shared, status}_page() Andrew Cooper
2018-11-22 14:35   ` Julien Grall
2018-11-22 15:47   ` Jan Beulich
2018-11-21 13:21 ` [PATCH 13/14] xen/gnttab: Simplify gnttab_map_frame() Andrew Cooper
2018-11-22 14:36   ` Julien Grall
2018-11-22 15:48   ` Jan Beulich
2018-11-21 13:21 ` [PATCH 14/14] xen/gnttab: Minor improvements to arch header files Andrew Cooper
2018-11-22 15:51   ` Jan Beulich
2018-11-22 17:56   ` Andrew Cooper
2019-01-30 20:04     ` Julien Grall
2019-01-30 20:05       ` Andrew Cooper
2018-11-21 17:19 ` [PATCH 00/14] XSA-277 followup Tamas K Lengyel
2018-11-21 21:22   ` Andrew Cooper
2018-11-21 22:42     ` Tamas K Lengyel
2018-11-22  0:08       ` Andrew Cooper
2018-11-26 17:48         ` Tamas K Lengyel
2018-11-23 15:46     ` Roger Pau Monné
2019-01-30 18:36 ` Pings for 4.12 " Andrew Cooper

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1542806482-24030-4-git-send-email-andrew.cooper3@citrix.com \
    --to=andrew.cooper3@citrix.com \
    --cc=JBeulich@suse.com \
    --cc=brian.woods@amd.com \
    --cc=roger.pau@citrix.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).