All of lore.kernel.org
 help / color / mirror / Atom feed
From: Tamas K Lengyel <tamas.lengyel@intel.com>
To: xen-devel@lists.xenproject.org
Cc: "Tamas K Lengyel" <tamas@tklengyel.com>,
	"Tamas K Lengyel" <tamas.lengyel@intel.com>,
	"Wei Liu" <wl@xen.org>,
	"George Dunlap" <george.dunlap@eu.citrix.com>,
	"Andrew Cooper" <andrew.cooper3@citrix.com>,
	"Jan Beulich" <jbeulich@suse.com>,
	"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v2 04/20] x86/mem_sharing: cleanup code and comments in various locations
Date: Wed, 18 Dec 2019 11:40:41 -0800	[thread overview]
Message-ID: <cacdc0909f85b78b69c07ba5230b84ceae82954a.1576697796.git.tamas.lengyel@intel.com> (raw)
In-Reply-To: <cover.1576697796.git.tamas.lengyel@intel.com>

No functional changes.

Signed-off-by: Tamas K Lengyel <tamas.lengyel@intel.com>
---
 xen/arch/x86/hvm/hvm.c            |  11 +-
 xen/arch/x86/mm/mem_sharing.c     | 342 +++++++++++++++++-------------
 xen/arch/x86/mm/p2m.c             |  17 +-
 xen/include/asm-x86/mem_sharing.h |  51 +++--
 4 files changed, 236 insertions(+), 185 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5a3a962fbb..1e888b403b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1902,12 +1902,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
     if ( npfec.write_access && (p2mt == p2m_ram_shared) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        sharing_enomem = 
-            (mem_sharing_unshare_page(currd, gfn, 0) < 0);
+        sharing_enomem = mem_sharing_unshare_page(currd, gfn, 0);
         rc = 1;
         goto out_put_gfn;
     }
- 
+
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
     {
@@ -1953,9 +1952,11 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
         __put_gfn(p2m, gfn);
     __put_gfn(hostp2m, gfn);
  out:
-    /* All of these are delayed until we exit, since we might 
+    /*
+     * All of these are delayed until we exit, since we might
      * sleep on event ring wait queues, and we must not hold
-     * locks in such circumstance */
+     * locks in such circumstance.
+     */
     if ( paged )
         p2m_mem_paging_populate(currd, gfn);
     if ( sharing_enomem )
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index efb8821768..319aaf3074 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -59,8 +59,10 @@ static DEFINE_PER_CPU(pg_lock_data_t, __pld);
 #define RMAP_USES_HASHTAB(page) \
         ((page)->sharing->hash_table.flag == NULL)
 #define RMAP_HEAVY_SHARED_PAGE   RMAP_HASHTAB_SIZE
-/* A bit of hysteresis. We don't want to be mutating between list and hash
- * table constantly. */
+/*
+ * A bit of hysteresis. We don't want to be mutating between list and hash
+ * table constantly.
+ */
 #define RMAP_LIGHT_SHARED_PAGE   (RMAP_HEAVY_SHARED_PAGE >> 2)
 
 #if MEM_SHARING_AUDIT
@@ -88,7 +90,7 @@ static inline void page_sharing_dispose(struct page_info *page)
 {
     /* Unlikely given our thresholds, but we should be careful. */
     if ( unlikely(RMAP_USES_HASHTAB(page)) )
-        free_xenheap_pages(page->sharing->hash_table.bucket, 
+        free_xenheap_pages(page->sharing->hash_table.bucket,
                             RMAP_HASHTAB_ORDER);
 
     spin_lock(&shr_audit_lock);
@@ -105,7 +107,7 @@ static inline void page_sharing_dispose(struct page_info *page)
 {
     /* Unlikely given our thresholds, but we should be careful. */
     if ( unlikely(RMAP_USES_HASHTAB(page)) )
-        free_xenheap_pages(page->sharing->hash_table.bucket, 
+        free_xenheap_pages(page->sharing->hash_table.bucket,
                             RMAP_HASHTAB_ORDER);
     xfree(page->sharing);
 }
@@ -122,8 +124,8 @@ static inline void page_sharing_dispose(struct page_info *page)
  * Nesting may happen when sharing (and locking) two pages.
  * Deadlock is avoided by locking pages in increasing order.
  * All memory sharing code paths take the p2m lock of the affected gfn before
- * taking the lock for the underlying page. We enforce ordering between page_lock
- * and p2m_lock using an mm-locks.h construct.
+ * taking the lock for the underlying page. We enforce ordering between
+ * page_lock and p2m_lock using an mm-locks.h construct.
  *
  * TODO: Investigate if PGT_validated is necessary.
  */
@@ -168,7 +170,7 @@ static inline bool mem_sharing_page_lock(struct page_info *pg)
     if ( rc )
     {
         preempt_disable();
-        page_sharing_mm_post_lock(&pld->mm_unlock_level, 
+        page_sharing_mm_post_lock(&pld->mm_unlock_level,
                                   &pld->recurse_count);
     }
     return rc;
@@ -178,7 +180,7 @@ static inline void mem_sharing_page_unlock(struct page_info *pg)
 {
     pg_lock_data_t *pld = &(this_cpu(__pld));
 
-    page_sharing_mm_unlock(pld->mm_unlock_level, 
+    page_sharing_mm_unlock(pld->mm_unlock_level,
                            &pld->recurse_count);
     preempt_enable();
     _page_unlock(pg);
@@ -186,7 +188,7 @@ static inline void mem_sharing_page_unlock(struct page_info *pg)
 
 static inline shr_handle_t get_next_handle(void)
 {
-    /* Get the next handle get_page style */ 
+    /* Get the next handle get_page style */
     uint64_t x, y = next_handle;
     do {
         x = y;
@@ -198,24 +200,26 @@ static inline shr_handle_t get_next_handle(void)
 #define mem_sharing_enabled(d) \
     (is_hvm_domain(d) && (d)->arch.hvm.mem_sharing_enabled)
 
-static atomic_t nr_saved_mfns   = ATOMIC_INIT(0); 
+static atomic_t nr_saved_mfns   = ATOMIC_INIT(0);
 static atomic_t nr_shared_mfns  = ATOMIC_INIT(0);
 
-/** Reverse map **/
-/* Every shared frame keeps a reverse map (rmap) of <domain, gfn> tuples that
+/*
+ * Reverse map
+ *
+ * Every shared frame keeps a reverse map (rmap) of <domain, gfn> tuples that
  * this shared frame backs. For pages with a low degree of sharing, a O(n)
  * search linked list is good enough. For pages with higher degree of sharing,
- * we use a hash table instead. */
+ * we use a hash table instead.
+ */
 
 typedef struct gfn_info
 {
     unsigned long gfn;
-    domid_t domain; 
+    domid_t domain;
     struct list_head list;
 } gfn_info_t;
 
-static inline void
-rmap_init(struct page_info *page)
+static inline void rmap_init(struct page_info *page)
 {
     /* We always start off as a doubly linked list. */
     INIT_LIST_HEAD(&page->sharing->gfns);
@@ -225,10 +229,11 @@ rmap_init(struct page_info *page)
 #define HASH(domain, gfn)       \
     (((gfn) + (domain)) % RMAP_HASHTAB_SIZE)
 
-/* Conversions. Tuned by the thresholds. Should only happen twice 
- * (once each) during the lifetime of a shared page */
-static inline int
-rmap_list_to_hash_table(struct page_info *page)
+/*
+ * Conversions. Tuned by the thresholds. Should only happen twice
+ * (once each) during the lifetime of a shared page.
+ */
+static inline int rmap_list_to_hash_table(struct page_info *page)
 {
     unsigned int i;
     struct list_head *pos, *tmp, *b =
@@ -254,8 +259,7 @@ rmap_list_to_hash_table(struct page_info *page)
     return 0;
 }
 
-static inline void
-rmap_hash_table_to_list(struct page_info *page)
+static inline void rmap_hash_table_to_list(struct page_info *page)
 {
     unsigned int i;
     struct list_head *bucket = page->sharing->hash_table.bucket;
@@ -276,8 +280,7 @@ rmap_hash_table_to_list(struct page_info *page)
 }
 
 /* Generic accessors to the rmap */
-static inline unsigned long
-rmap_count(struct page_info *pg)
+static inline unsigned long rmap_count(struct page_info *pg)
 {
     unsigned long count;
     unsigned long t = read_atomic(&pg->u.inuse.type_info);
@@ -287,11 +290,13 @@ rmap_count(struct page_info *pg)
     return count;
 }
 
-/* The page type count is always decreased after removing from the rmap.
- * Use a convert flag to avoid mutating the rmap if in the middle of an 
- * iterator, or if the page will be soon destroyed anyways. */
-static inline void
-rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
+/*
+ * The page type count is always decreased after removing from the rmap.
+ * Use a convert flag to avoid mutating the rmap if in the middle of an
+ * iterator, or if the page will be soon destroyed anyways.
+ */
+static inline
+void rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
 {
     if ( RMAP_USES_HASHTAB(page) && convert &&
          (rmap_count(page) <= RMAP_LIGHT_SHARED_PAGE) )
@@ -302,8 +307,7 @@ rmap_del(gfn_info_t *gfn_info, struct page_info *page, int convert)
 }
 
 /* The page type count is always increased before adding to the rmap. */
-static inline void
-rmap_add(gfn_info_t *gfn_info, struct page_info *page)
+static inline void rmap_add(gfn_info_t *gfn_info, struct page_info *page)
 {
     struct list_head *head;
 
@@ -314,7 +318,7 @@ rmap_add(gfn_info_t *gfn_info, struct page_info *page)
         (void)rmap_list_to_hash_table(page);
 
     head = (RMAP_USES_HASHTAB(page)) ?
-        page->sharing->hash_table.bucket + 
+        page->sharing->hash_table.bucket +
                             HASH(gfn_info->domain, gfn_info->gfn) :
         &page->sharing->gfns;
 
@@ -322,9 +326,9 @@ rmap_add(gfn_info_t *gfn_info, struct page_info *page)
     list_add(&gfn_info->list, head);
 }
 
-static inline gfn_info_t *
-rmap_retrieve(uint16_t domain_id, unsigned long gfn, 
-                            struct page_info *page)
+static inline
+gfn_info_t *rmap_retrieve(uint16_t domain_id, unsigned long gfn,
+                          struct page_info *page)
 {
     gfn_info_t *gfn_info;
     struct list_head *le, *head;
@@ -364,18 +368,18 @@ struct rmap_iterator {
     unsigned int bucket;
 };
 
-static inline void
-rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri)
+static inline
+void rmap_seed_iterator(struct page_info *page, struct rmap_iterator *ri)
 {
     ri->curr = (RMAP_USES_HASHTAB(page)) ?
                 page->sharing->hash_table.bucket :
                 &page->sharing->gfns;
-    ri->next = ri->curr->next; 
+    ri->next = ri->curr->next;
     ri->bucket = 0;
 }
 
-static inline gfn_info_t *
-rmap_iterate(struct page_info *page, struct rmap_iterator *ri)
+static inline
+gfn_info_t *rmap_iterate(struct page_info *page, struct rmap_iterator *ri)
 {
     struct list_head *head = (RMAP_USES_HASHTAB(page)) ?
                 page->sharing->hash_table.bucket + ri->bucket :
@@ -405,14 +409,14 @@ retry:
     return list_entry(ri->curr, gfn_info_t, list);
 }
 
-static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page,
-                                                struct domain *d,
-                                                unsigned long gfn)
+static inline
+gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page, struct domain *d,
+                                  unsigned long gfn)
 {
     gfn_info_t *gfn_info = xmalloc(gfn_info_t);
 
     if ( gfn_info == NULL )
-        return NULL; 
+        return NULL;
 
     gfn_info->gfn = gfn;
     gfn_info->domain = d->domain_id;
@@ -425,9 +429,9 @@ static inline gfn_info_t *mem_sharing_gfn_alloc(struct page_info *page,
     return gfn_info;
 }
 
-static inline void mem_sharing_gfn_destroy(struct page_info *page,
-                                           struct domain *d,
-                                           gfn_info_t *gfn_info)
+static inline
+void mem_sharing_gfn_destroy(struct page_info *page, struct domain *d,
+                             gfn_info_t *gfn_info)
 {
     /* Decrement the number of pages. */
     atomic_dec(&d->shr_pages);
@@ -437,25 +441,29 @@ static inline void mem_sharing_gfn_destroy(struct page_info *page,
     xfree(gfn_info);
 }
 
-static struct page_info* mem_sharing_lookup(unsigned long mfn)
+static inline struct page_info* mem_sharing_lookup(unsigned long mfn)
 {
-    if ( mfn_valid(_mfn(mfn)) )
-    {
-        struct page_info* page = mfn_to_page(_mfn(mfn));
-        if ( page_get_owner(page) == dom_cow )
-        {
-            /* Count has to be at least two, because we're called
-             * with the mfn locked (1) and this is supposed to be 
-             * a shared page (1). */
-            unsigned long t = read_atomic(&page->u.inuse.type_info);
-            ASSERT((t & PGT_type_mask) == PGT_shared_page);
-            ASSERT((t & PGT_count_mask) >= 2);
-            ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
-            return page;
-        }
-    }
+    struct page_info* page;
+    unsigned long t;
 
-    return NULL;
+    if ( !mfn_valid(_mfn(mfn)) )
+        return NULL;
+
+    page = mfn_to_page(_mfn(mfn));
+    if ( page_get_owner(page) != dom_cow )
+        return NULL;
+
+    /*
+     * Count has to be at least two, because we're called
+     * with the mfn locked (1) and this is supposed to be
+     * a shared page (1).
+     */
+    t = read_atomic(&page->u.inuse.type_info);
+    ASSERT((t & PGT_type_mask) == PGT_shared_page);
+    ASSERT((t & PGT_count_mask) >= 2);
+    ASSERT(SHARED_M2P(get_gpfn_from_mfn(mfn)));
+
+    return page;
 }
 
 static int audit(void)
@@ -492,7 +500,7 @@ static int audit(void)
            continue;
         }
 
-        /* Check if the MFN has correct type, owner and handle. */ 
+        /* Check if the MFN has correct type, owner and handle. */
         if ( (pg->u.inuse.type_info & PGT_type_mask) != PGT_shared_page )
         {
            MEM_SHARING_DEBUG("mfn %lx in audit list, but not PGT_shared_page (%lx)!\n",
@@ -545,7 +553,7 @@ static int audit(void)
                 errors++;
                 continue;
             }
-            o_mfn = get_gfn_query_unlocked(d, g->gfn, &t); 
+            o_mfn = get_gfn_query_unlocked(d, g->gfn, &t);
             if ( !mfn_eq(o_mfn, mfn) )
             {
                 MEM_SHARING_DEBUG("Incorrect P2M for d=%hu, PFN=%lx."
@@ -568,7 +576,7 @@ static int audit(void)
         {
             MEM_SHARING_DEBUG("Mismatched counts for MFN=%lx."
                               "nr_gfns in list %lu, in type_info %lx\n",
-                              mfn_x(mfn), nr_gfns, 
+                              mfn_x(mfn), nr_gfns,
                               (pg->u.inuse.type_info & PGT_count_mask));
             errors++;
         }
@@ -603,7 +611,7 @@ int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
         .u.mem_sharing.p2mt = p2m_ram_shared
     };
 
-    if ( (rc = __vm_event_claim_slot(d, 
+    if ( (rc = __vm_event_claim_slot(d,
                         d->vm_event_share, allow_sleep)) < 0 )
         return rc;
 
@@ -629,9 +637,9 @@ unsigned int mem_sharing_get_nr_shared_mfns(void)
 }
 
 /* Functions that change a page's type and ownership */
-static int page_make_sharable(struct domain *d, 
-                       struct page_info *page, 
-                       int expected_refcnt)
+static int page_make_sharable(struct domain *d,
+                              struct page_info *page,
+                              int expected_refcnt)
 {
     bool_t drop_dom_ref;
 
@@ -658,8 +666,10 @@ static int page_make_sharable(struct domain *d,
         return -EEXIST;
     }
 
-    /* Check if the ref count is 2. The first from PGC_allocated, and
-     * the second from get_page_and_type at the top of this function */
+    /*
+     * Check if the ref count is 2. The first from PGC_allocated, and
+     * the second from get_page_and_type at the top of this function.
+     */
     if ( page->count_info != (PGC_allocated | (2 + expected_refcnt)) )
     {
         spin_unlock(&d->page_alloc_lock);
@@ -675,6 +685,7 @@ static int page_make_sharable(struct domain *d,
 
     if ( drop_dom_ref )
         put_domain(d);
+
     return 0;
 }
 
@@ -684,7 +695,7 @@ static int page_make_private(struct domain *d, struct page_info *page)
 
     if ( !get_page(page, dom_cow) )
         return -EINVAL;
-    
+
     spin_lock(&d->page_alloc_lock);
 
     if ( d->is_dying )
@@ -727,10 +738,13 @@ static inline struct page_info *__grab_shared_page(mfn_t mfn)
 
     if ( !mfn_valid(mfn) )
         return NULL;
+
     pg = mfn_to_page(mfn);
 
-    /* If the page is not validated we can't lock it, and if it's  
-     * not validated it's obviously not shared. */
+    /*
+     * If the page is not validated we can't lock it, and if it's
+     * not validated it's obviously not shared.
+     */
     if ( !mem_sharing_page_lock(pg) )
         return NULL;
 
@@ -754,10 +768,10 @@ static int debug_mfn(mfn_t mfn)
         return -EINVAL;
     }
 
-    MEM_SHARING_DEBUG( 
+    MEM_SHARING_DEBUG(
             "Debug page: MFN=%lx is ci=%lx, ti=%lx, owner_id=%d\n",
-            mfn_x(page_to_mfn(page)), 
-            page->count_info, 
+            mfn_x(page_to_mfn(page)),
+            page->count_info,
             page->u.inuse.type_info,
             page_get_owner(page)->domain_id);
 
@@ -775,7 +789,7 @@ static int debug_gfn(struct domain *d, gfn_t gfn)
 
     mfn = get_gfn_query(d, gfn_x(gfn), &p2mt);
 
-    MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n", 
+    MEM_SHARING_DEBUG("Debug for dom%d, gfn=%" PRI_gfn "\n",
                       d->domain_id, gfn_x(gfn));
     num_refs = debug_mfn(mfn);
     put_gfn(d, gfn_x(gfn));
@@ -796,9 +810,9 @@ static int debug_gref(struct domain *d, grant_ref_t ref)
                           d->domain_id, ref, rc);
         return rc;
     }
-    
+
     MEM_SHARING_DEBUG(
-            "==> Grant [dom=%d,ref=%d], status=%x. ", 
+            "==> Grant [dom=%d,ref=%d], status=%x. ",
             d->domain_id, ref, status);
 
     return debug_gfn(d, gfn);
@@ -824,15 +838,12 @@ static int nominate_page(struct domain *d, gfn_t gfn,
         goto out;
 
     /* Return the handle if the page is already shared */
-    if ( p2m_is_shared(p2mt) ) {
+    if ( p2m_is_shared(p2mt) )
+    {
         struct page_info *pg = __grab_shared_page(mfn);
         if ( !pg )
-        {
-            gprintk(XENLOG_ERR,
-                    "Shared p2m entry gfn %" PRI_gfn ", but could not grab mfn %" PRI_mfn " dom%d\n",
-                    gfn_x(gfn), mfn_x(mfn), d->domain_id);
             BUG();
-        }
+
         *phandle = pg->sharing->handle;
         ret = 0;
         mem_sharing_page_unlock(pg);
@@ -843,7 +854,6 @@ static int nominate_page(struct domain *d, gfn_t gfn,
     if ( !p2m_is_sharable(p2mt) )
         goto out;
 
-#ifdef CONFIG_HVM
     /* Check if there are mem_access/remapped altp2m entries for this page */
     if ( altp2m_active(d) )
     {
@@ -872,42 +882,42 @@ static int nominate_page(struct domain *d, gfn_t gfn,
 
         altp2m_list_unlock(d);
     }
-#endif
 
     /* Try to convert the mfn to the sharable type */
     page = mfn_to_page(mfn);
-    ret = page_make_sharable(d, page, expected_refcnt); 
-    if ( ret ) 
+    ret = page_make_sharable(d, page, expected_refcnt);
+    if ( ret )
         goto out;
 
-    /* Now that the page is validated, we can lock it. There is no 
-     * race because we're holding the p2m entry, so no one else 
-     * could be nominating this gfn */
+    /*
+     * Now that the page is validated, we can lock it. There is no
+     * race because we're holding the p2m entry, so no one else
+     * could be nominating this gfn.
+     */
     ret = -ENOENT;
     if ( !mem_sharing_page_lock(page) )
         goto out;
 
     /* Initialize the shared state */
     ret = -ENOMEM;
-    if ( (page->sharing = 
-            xmalloc(struct page_sharing_info)) == NULL )
+    if ( !(page->sharing = xmalloc(struct page_sharing_info)) )
     {
         /* Making a page private atomically unlocks it */
-        BUG_ON(page_make_private(d, page) != 0);
+        BUG_ON(page_make_private(d, page));
         goto out;
     }
     page->sharing->pg = page;
     rmap_init(page);
 
     /* Create the handle */
-    page->sharing->handle = get_next_handle();  
+    page->sharing->handle = get_next_handle();
 
     /* Create the local gfn info */
-    if ( mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) == NULL )
+    if ( !mem_sharing_gfn_alloc(page, d, gfn_x(gfn)) )
     {
         xfree(page->sharing);
         page->sharing = NULL;
-        BUG_ON(page_make_private(d, page) != 0);
+        BUG_ON(page_make_private(d, page));
         goto out;
     }
 
@@ -946,15 +956,19 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
                  cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
 
-    /* This tricky business is to avoid two callers deadlocking if 
-     * grabbing pages in opposite client/source order */
+    /*
+     * This tricky business is to avoid two callers deadlocking if
+     * grabbing pages in opposite client/source order.
+     */
     if ( mfn_eq(smfn, cmfn) )
     {
-        /* The pages are already the same.  We could return some
+        /*
+         * The pages are already the same.  We could return some
          * kind of error here, but no matter how you look at it,
          * the pages are already 'shared'.  It possibly represents
          * a big problem somewhere else, but as far as sharing is
-         * concerned: great success! */
+         * concerned: great success!
+         */
         ret = 0;
         goto err_out;
     }
@@ -1010,11 +1024,15 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     rmap_seed_iterator(cpage, &ri);
     while ( (gfn = rmap_iterate(cpage, &ri)) != NULL)
     {
-        /* Get the source page and type, this should never fail: 
-         * we are under shr lock, and got a successful lookup */
+        /*
+         * Get the source page and type, this should never fail:
+         * we are under shr lock, and got a successful lookup.
+         */
         BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
-        /* Move the gfn_info from client list to source list.
-         * Don't change the type of rmap for the client page. */
+        /*
+         * Move the gfn_info from client list to source list.
+         * Don't change the type of rmap for the client page.
+         */
         rmap_del(gfn, cpage, 0);
         rmap_add(gfn, spage);
         put_count++;
@@ -1043,14 +1061,14 @@ static int share_pages(struct domain *sd, gfn_t sgfn, shr_handle_t sh,
     atomic_dec(&nr_shared_mfns);
     atomic_inc(&nr_saved_mfns);
     ret = 0;
-    
+
 err_out:
     put_two_gfns(&tg);
     return ret;
 }
 
 int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle_t sh,
-                            struct domain *cd, unsigned long cgfn) 
+                               struct domain *cd, unsigned long cgfn)
 {
     struct page_info *spage;
     int ret = -EINVAL;
@@ -1069,15 +1087,18 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
     spage = __grab_shared_page(smfn);
     if ( spage == NULL )
         goto err_out;
+
     ASSERT(smfn_type == p2m_ram_shared);
 
     /* Check that the handles match */
     if ( spage->sharing->handle != sh )
         goto err_unlock;
 
-    /* Make sure the target page is a hole in the physmap. These are typically
+    /*
+     * Make sure the target page is a hole in the physmap. These are typically
      * p2m_mmio_dm, but also accept p2m_invalid and paged out pages. See the
-     * definition of p2m_is_hole in p2m.h. */
+     * definition of p2m_is_hole in p2m.h.
+     */
     if ( !p2m_is_hole(cmfn_type) )
     {
         ret = XENMEM_SHARING_OP_C_HANDLE_INVALID;
@@ -1086,7 +1107,7 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
 
     /* This is simpler than regular sharing */
     BUG_ON(!get_page_and_type(spage, dom_cow, PGT_shared_page));
-    if ( (gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) == NULL )
+    if ( !(gfn_info = mem_sharing_gfn_alloc(spage, cd, cgfn)) )
     {
         put_page_and_type(spage);
         ret = -ENOMEM;
@@ -1102,11 +1123,17 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
         mem_sharing_gfn_destroy(spage, cd, gfn_info);
         put_page_and_type(spage);
     } else {
-        /* There is a chance we're plugging a hole where a paged out page was */
+        /*
+         * There is a chance we're plugging a hole where a paged out
+         * page was.
+         */
         if ( p2m_is_paging(cmfn_type) && (cmfn_type != p2m_ram_paging_out) )
         {
             atomic_dec(&cd->paged_pages);
-            /* Further, there is a chance this was a valid page. Don't leak it. */
+            /*
+             * Further, there is a chance this was a valid page.
+             * Don't leak it.
+             */
             if ( mfn_valid(cmfn) )
             {
                 struct page_info *cpage = mfn_to_page(cmfn);
@@ -1133,13 +1160,14 @@ err_out:
 }
 
 
-/* A note on the rationale for unshare error handling:
+/*
+ * A note on the rationale for unshare error handling:
  *  1. Unshare can only fail with ENOMEM. Any other error conditions BUG_ON()'s
  *  2. We notify a potential dom0 helper through a vm_event ring. But we
- *     allow the notification to not go to sleep. If the event ring is full 
+ *     allow the notification to not go to sleep. If the event ring is full
  *     of ENOMEM warnings, then it's on the ball.
  *  3. We cannot go to sleep until the unshare is resolved, because we might
- *     be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy) 
+ *     be buried deep into locks (e.g. something -> copy_to_user -> __hvm_copy)
  *  4. So, we make sure we:
  *     4.1. return an error
  *     4.2. do not corrupt shared memory
@@ -1147,19 +1175,20 @@ err_out:
  *     4.4. let the guest deal with it if the error propagation will reach it
  */
 int __mem_sharing_unshare_page(struct domain *d,
-                             unsigned long gfn, 
-                             uint16_t flags)
+                               unsigned long gfn,
+                               uint16_t flags)
 {
     p2m_type_t p2mt;
     mfn_t mfn;
     struct page_info *page, *old_page;
     int last_gfn;
     gfn_info_t *gfn_info = NULL;
-   
+
     mfn = get_gfn(d, gfn, &p2mt);
-    
+
     /* Has someone already unshared it? */
-    if ( !p2m_is_shared(p2mt) ) {
+    if ( !p2m_is_shared(p2mt) )
+    {
         put_gfn(d, gfn);
         return 0;
     }
@@ -1167,26 +1196,30 @@ int __mem_sharing_unshare_page(struct domain *d,
     page = __grab_shared_page(mfn);
     if ( page == NULL )
     {
-        gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: "
-                                "%lx\n", gfn);
+        gdprintk(XENLOG_ERR, "Domain p2m is shared, but page is not: %lx\n",
+                 gfn);
         BUG();
     }
 
     gfn_info = rmap_retrieve(d->domain_id, gfn, page);
     if ( unlikely(gfn_info == NULL) )
     {
-        gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: "
-                                "%lx\n", gfn);
+        gdprintk(XENLOG_ERR, "Could not find gfn_info for shared gfn: %lx\n",
+                 gfn);
         BUG();
     }
 
-    /* Do the accounting first. If anything fails below, we have bigger
-     * bigger fish to fry. First, remove the gfn from the list. */ 
+    /*
+     * Do the accounting first. If anything fails below, we have bigger
+     * bigger fish to fry. First, remove the gfn from the list.
+     */
     last_gfn = rmap_has_one_entry(page);
     if ( last_gfn )
     {
-        /* Clean up shared state. Get rid of the <domid, gfn> tuple
-         * before destroying the rmap. */
+        /*
+         * Clean up shared state. Get rid of the <domid, gfn> tuple
+         * before destroying the rmap.
+         */
         mem_sharing_gfn_destroy(page, d, gfn_info);
         page_sharing_dispose(page);
         page->sharing = NULL;
@@ -1195,8 +1228,10 @@ int __mem_sharing_unshare_page(struct domain *d,
     else
         atomic_dec(&nr_saved_mfns);
 
-    /* If the GFN is getting destroyed drop the references to MFN 
-     * (possibly freeing the page), and exit early */
+    /*
+     * If the GFN is getting destroyed drop the references to MFN
+     * (possibly freeing the page), and exit early.
+     */
     if ( flags & MEM_SHARING_DESTROY_GFN )
     {
         if ( !last_gfn )
@@ -1212,7 +1247,7 @@ int __mem_sharing_unshare_page(struct domain *d,
 
         return 0;
     }
- 
+
     if ( last_gfn )
     {
         /* Making a page private atomically unlocks it */
@@ -1222,14 +1257,16 @@ int __mem_sharing_unshare_page(struct domain *d,
 
     old_page = page;
     page = alloc_domheap_page(d, 0);
-    if ( !page ) 
+    if ( !page )
     {
         /* Undo dec of nr_saved_mfns, as the retry will decrease again. */
         atomic_inc(&nr_saved_mfns);
         mem_sharing_page_unlock(old_page);
         put_gfn(d, gfn);
-        /* Caller is responsible for placing an event
-         * in the ring */
+        /*
+         * Caller is responsible for placing an event
+         * in the ring.
+         */
         return -ENOMEM;
     }
 
@@ -1240,11 +1277,11 @@ int __mem_sharing_unshare_page(struct domain *d,
     mem_sharing_page_unlock(old_page);
     put_page_and_type(old_page);
 
-private_page_found:    
+ private_page_found:
     if ( p2m_change_type_one(d, gfn, p2m_ram_shared, p2m_ram_rw) )
     {
-        gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n", 
-                                d->domain_id, gfn);
+        gdprintk(XENLOG_ERR, "Could not change p2m type d %hu gfn %lx.\n",
+                 d->domain_id, gfn);
         BUG();
     }
 
@@ -1277,20 +1314,23 @@ int relinquish_shared_pages(struct domain *d)
         mfn_t mfn;
         int set_rc;
 
-        if ( atomic_read(&d->shr_pages) == 0 )
+        if ( !atomic_read(&d->shr_pages) )
             break;
+
         mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL);
-        if ( mfn_valid(mfn) && (t == p2m_ram_shared) )
+        if ( mfn_valid(mfn) && t == p2m_ram_shared )
         {
             /* Does not fail with ENOMEM given the DESTROY flag */
-            BUG_ON(__mem_sharing_unshare_page(d, gfn, 
-                    MEM_SHARING_DESTROY_GFN));
-            /* Clear out the p2m entry so no one else may try to
+            BUG_ON(__mem_sharing_unshare_page(d, gfn,
+                   MEM_SHARING_DESTROY_GFN));
+            /*
+             * Clear out the p2m entry so no one else may try to
              * unshare.  Must succeed: we just read the old entry and
-             * we hold the p2m lock. */
+             * we hold the p2m lock.
+             */
             set_rc = p2m->set_entry(p2m, _gfn(gfn), _mfn(0), PAGE_ORDER_4K,
                                     p2m_invalid, p2m_access_rwx, -1);
-            ASSERT(set_rc == 0);
+            ASSERT(!set_rc);
             count += 0x10;
         }
         else
@@ -1454,7 +1494,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
 
             if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.source_gfn) )
             {
-                grant_ref_t gref = (grant_ref_t) 
+                grant_ref_t gref = (grant_ref_t)
                                     (XENMEM_SHARING_OP_FIELD_GET_GREF(
                                         mso.u.share.source_gfn));
                 rc = mem_sharing_gref_to_gfn(d->grant_table, gref, &sgfn,
@@ -1470,7 +1510,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
 
             if ( XENMEM_SHARING_OP_FIELD_IS_GREF(mso.u.share.client_gfn) )
             {
-                grant_ref_t gref = (grant_ref_t) 
+                grant_ref_t gref = (grant_ref_t)
                                     (XENMEM_SHARING_OP_FIELD_GET_GREF(
                                         mso.u.share.client_gfn));
                 rc = mem_sharing_gref_to_gfn(cd->grant_table, gref, &cgfn,
@@ -1534,7 +1574,7 @@ int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg)
             sh      = mso.u.share.source_handle;
             cgfn    = mso.u.share.client_gfn;
 
-            rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn); 
+            rc = mem_sharing_add_to_physmap(d, sgfn, sh, cd, cgfn);
 
             rcu_unlock_domain(cd);
         }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index ba126f790a..3119269073 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -511,8 +511,10 @@ mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn_l,
     if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) )
     {
         ASSERT(p2m_is_hostp2m(p2m));
-        /* Try to unshare. If we fail, communicate ENOMEM without
-         * sleeping. */
+        /*
+         * Try to unshare. If we fail, communicate ENOMEM without
+         * sleeping.
+         */
         if ( mem_sharing_unshare_page(p2m->domain, gfn_l, 0) < 0 )
             mem_sharing_notify_enomem(p2m->domain, gfn_l, false);
         mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order, NULL);
@@ -892,15 +894,15 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
                               &a, 0, NULL, NULL);
         if ( p2m_is_shared(ot) )
         {
-            /* Do an unshare to cleanly take care of all corner 
-             * cases. */
+            /* Do an unshare to cleanly take care of all corner cases. */
             int rc;
             rc = mem_sharing_unshare_page(p2m->domain,
                                           gfn_x(gfn_add(gfn, i)), 0);
             if ( rc )
             {
                 p2m_unlock(p2m);
-                /* NOTE: Should a guest domain bring this upon itself,
+                /*
+                 * NOTE: Should a guest domain bring this upon itself,
                  * there is not a whole lot we can do. We are buried
                  * deep in locks from most code paths by now. So, fail
                  * the call and don't try to sleep on a wait queue
@@ -909,8 +911,9 @@ guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
                  * However, all current (changeset 3432abcf9380) code
                  * paths avoid this unsavoury situation. For now.
                  *
-                 * Foreign domains are okay to place an event as they 
-                 * won't go to sleep. */
+                 * Foreign domains are okay to place an event as they
+                 * won't go to sleep.
+                 */
                 (void)mem_sharing_notify_enomem(p2m->domain,
                                                 gfn_x(gfn_add(gfn, i)), false);
                 return rc;
diff --git a/xen/include/asm-x86/mem_sharing.h b/xen/include/asm-x86/mem_sharing.h
index db22468744..7d40e38563 100644
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -33,12 +33,14 @@
 #define MEM_SHARING_AUDIT 0
 #endif
 
-typedef uint64_t shr_handle_t; 
+typedef uint64_t shr_handle_t;
 
 typedef struct rmap_hashtab {
     struct list_head *bucket;
-    /* Overlaps with prev pointer of list_head in union below.
-     * Unlike the prev pointer, this can be NULL. */
+    /*
+     * Overlaps with prev pointer of list_head in union below.
+     * Unlike the prev pointer, this can be NULL.
+     */
     void *flag;
 } rmap_hashtab_t;
 
@@ -57,34 +59,34 @@ struct page_sharing_info
     };
 };
 
-#define sharing_supported(_d) \
-    (is_hvm_domain(_d) && paging_mode_hap(_d)) 
-
 unsigned int mem_sharing_get_nr_saved_mfns(void);
 unsigned int mem_sharing_get_nr_shared_mfns(void);
 
 #define MEM_SHARING_DESTROY_GFN       (1<<1)
 /* Only fails with -ENOMEM. Enforce it with a BUG_ON wrapper. */
 int __mem_sharing_unshare_page(struct domain *d,
-                             unsigned long gfn, 
-                             uint16_t flags);
-static inline int mem_sharing_unshare_page(struct domain *d,
-                                           unsigned long gfn,
-                                           uint16_t flags)
+                               unsigned long gfn,
+                               uint16_t flags);
+
+static inline
+int mem_sharing_unshare_page(struct domain *d,
+                             unsigned long gfn,
+                             uint16_t flags)
 {
     int rc = __mem_sharing_unshare_page(d, gfn, flags);
-    BUG_ON( rc && (rc != -ENOMEM) );
+    BUG_ON(rc && (rc != -ENOMEM));
     return rc;
 }
 
-/* If called by a foreign domain, possible errors are
+/*
+ * If called by a foreign domain, possible errors are
  *   -EBUSY -> ring full
  *   -ENOSYS -> no ring to begin with
  * and the foreign mapper is responsible for retrying.
  *
- * If called by the guest vcpu itself and allow_sleep is set, may 
- * sleep on a wait queue, so the caller is responsible for not 
- * holding locks on entry. It may only fail with ENOSYS 
+ * If called by the guest vcpu itself and allow_sleep is set, may
+ * sleep on a wait queue, so the caller is responsible for not
+ * holding locks on entry. It may only fail with ENOSYS
  *
  * If called by the guest vcpu itself and allow_sleep is not set,
  * then it's the same as a foreign domain.
@@ -92,10 +94,11 @@ static inline int mem_sharing_unshare_page(struct domain *d,
 int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
                               bool allow_sleep);
 int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg);
-int mem_sharing_domctl(struct domain *d, 
+int mem_sharing_domctl(struct domain *d,
                        struct xen_domctl_mem_sharing_op *mec);
 
-/* Scans the p2m and relinquishes any shared pages, destroying 
+/*
+ * Scans the p2m and relinquishes any shared pages, destroying
  * those for which this domain holds the final reference.
  * Preemptible.
  */
@@ -107,18 +110,22 @@ static inline unsigned int mem_sharing_get_nr_saved_mfns(void)
 {
     return 0;
 }
+
 static inline unsigned int mem_sharing_get_nr_shared_mfns(void)
 {
     return 0;
 }
-static inline int mem_sharing_unshare_page(struct domain *d,
-                                           unsigned long gfn,
-                                           uint16_t flags)
+
+static inline
+int mem_sharing_unshare_page(struct domain *d, unsigned long gfn,
+                             uint16_t flags)
 {
     ASSERT_UNREACHABLE();
     return -EOPNOTSUPP;
 }
-static inline int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
+
+static inline
+int mem_sharing_notify_enomem(struct domain *d, unsigned long gfn,
                               bool allow_sleep)
 {
     ASSERT_UNREACHABLE();
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2019-12-18 19:41 UTC|newest]

Thread overview: 96+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-18 19:40 [Xen-devel] [PATCH v2 00/20] VM forking Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 01/20] x86: make hvm_{get/set}_param accessible Tamas K Lengyel
2019-12-19 19:07   ` Andrew Cooper
2019-12-19 19:38     ` Tamas K Lengyel
2019-12-19 19:40       ` Andrew Cooper
2019-12-19 19:49         ` Tamas K Lengyel
2019-12-19 19:57           ` Andrew Cooper
2019-12-19 20:09             ` Tamas K Lengyel
2019-12-20 16:46   ` Jan Beulich
2019-12-20 17:27     ` Tamas K Lengyel
2019-12-20 17:32       ` Andrew Cooper
2019-12-20 17:36         ` Tamas K Lengyel
2019-12-20 17:46           ` Andrew Cooper
2019-12-20 17:50             ` Tamas K Lengyel
2019-12-20 18:00               ` Andrew Cooper
2019-12-20 18:05                 ` Tamas K Lengyel
2019-12-23  9:37         ` Jan Beulich
2019-12-23 14:55           ` Tamas K Lengyel
2019-12-27  8:02             ` Jan Beulich
2019-12-27 13:10               ` Tamas K Lengyel
2019-12-27 13:44                 ` Jan Beulich
2019-12-27 14:06                   ` Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 02/20] xen/x86: Make hap_get_allocation accessible Tamas K Lengyel
2019-12-19 19:08   ` Andrew Cooper
2019-12-20 16:48   ` Jan Beulich
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 03/20] tools/libxc: clean up memory sharing files Tamas K Lengyel
2019-12-18 19:40 ` Tamas K Lengyel [this message]
2019-12-19 11:18   ` [Xen-devel] [PATCH v2 04/20] x86/mem_sharing: cleanup code and comments in various locations Andrew Cooper
2019-12-19 16:20     ` Tamas K Lengyel
2019-12-19 16:21     ` Tamas K Lengyel
2019-12-19 18:51       ` Andrew Cooper
2019-12-19 19:26         ` Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 05/20] x86/mem_sharing: make get_two_gfns take locks conditionally Tamas K Lengyel
2019-12-19 19:12   ` Andrew Cooper
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 06/20] x86/mem_sharing: drop flags from mem_sharing_unshare_page Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 07/20] x86/mem_sharing: don't try to unshare twice during page fault Tamas K Lengyel
2019-12-19 19:19   ` Andrew Cooper
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 08/20] x86/mem_sharing: define mem_sharing_domain to hold some scattered variables Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 09/20] x86/mem_sharing: Use INVALID_MFN and p2m_is_shared in relinquish_shared_pages Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 10/20] x86/mem_sharing: Make add_to_physmap static and shorten name Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 11/20] x86/mem_sharing: Convert MEM_SHARING_DESTROY_GFN to a bool Tamas K Lengyel
2019-12-18 21:29   ` Julien Grall
2019-12-18 22:19     ` Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 12/20] x86/mem_sharing: Replace MEM_SHARING_DEBUG with gdprintk Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 13/20] x86/mem_sharing: ASSERT that p2m_set_entry succeeds Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 14/20] x86/mem_sharing: Enable mem_sharing on first memop Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 15/20] x86/mem_sharing: Skip xen heap pages in memshr nominate Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 16/20] x86/mem_sharing: check page type count earlier Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 17/20] xen/mem_sharing: VM forking Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 18/20] xen/mem_access: Use __get_gfn_type_access in set_mem_access Tamas K Lengyel
2019-12-19  7:59   ` Alexandru Stefan ISAILA
2019-12-19 16:00     ` Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 19/20] x86/mem_sharing: reset a fork Tamas K Lengyel
2019-12-18 22:00   ` Julien Grall
2019-12-18 22:33     ` Tamas K Lengyel
2019-12-18 23:01       ` Julien Grall
2019-12-19  0:15         ` Tamas K Lengyel
2019-12-19  7:45           ` Julien Grall
2019-12-19 16:11             ` Tamas K Lengyel
2019-12-19 16:57               ` Julien Grall
2019-12-19 17:23                 ` Tamas K Lengyel
2019-12-19 17:38                   ` Julien Grall
2019-12-19 18:00                     ` Tamas K Lengyel
2019-12-19 11:06           ` Jan Beulich
2019-12-19 16:02             ` Tamas K Lengyel
2019-12-18 19:40 ` [Xen-devel] [PATCH v2 20/20] xen/tools: VM forking toolstack side Tamas K Lengyel
2019-12-19  9:48 ` [Xen-devel] [PATCH v2 00/20] VM forking Roger Pau Monné
2019-12-19 15:58   ` Tamas K Lengyel
2019-12-30 17:59     ` Roger Pau Monné
2019-12-30 18:15       ` Tamas K Lengyel
2019-12-30 18:43         ` Julien Grall
2019-12-30 20:46           ` Tamas K Lengyel
2019-12-31  0:20             ` Julien Grall
2019-12-31  0:37               ` Tamas K Lengyel
2019-12-31 10:40                 ` Roger Pau Monné
2019-12-31 15:00                   ` Tamas K Lengyel
2019-12-31 15:11                     ` Roger Pau Monné
2019-12-31 16:08                       ` Tamas K Lengyel
2019-12-31 16:36                         ` Tamas K Lengyel
2020-01-08  9:42                           ` Julien Grall
2020-01-08 15:08                           ` Roger Pau Monné
2020-01-08 15:32                             ` Tamas K Lengyel
2020-01-08 18:00                               ` Roger Pau Monné
2020-01-08 18:14                                 ` Tamas K Lengyel
2020-01-08 18:23                                   ` Tamas K Lengyel
2020-01-08 18:44                                     ` Roger Pau Monné
2020-01-08 19:47                                       ` Tamas K Lengyel
2020-01-08 18:36                                   ` Roger Pau Monné
2020-01-08 19:51                                     ` Tamas K Lengyel
2020-01-09  9:47                                       ` Roger Pau Monné
2020-01-09 13:31                                         ` Tamas K Lengyel
2020-01-08 16:34                       ` George Dunlap
2020-01-08 17:06                         ` Tamas K Lengyel
2020-01-08 17:16                           ` George Dunlap
2020-01-08 17:25                             ` Tamas K Lengyel
2020-01-08 18:07                         ` Roger Pau Monné

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=cacdc0909f85b78b69c07ba5230b84ceae82954a.1576697796.git.tamas.lengyel@intel.com \
    --to=tamas.lengyel@intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=roger.pau@citrix.com \
    --cc=tamas@tklengyel.com \
    --cc=wl@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.