All of lore.kernel.org
 help / color / mirror / Atom feed
* [Xen-devel] [PATCH 0/3] Post-299 cleanups
@ 2019-12-13 17:37 George Dunlap
  2019-12-13 17:37 ` [Xen-devel] [PATCH 1/3] x86/mm: Use a more descriptive name for pagetable mfns George Dunlap
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: George Dunlap @ 2019-12-13 17:37 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, George Dunlap, Jan Beulich

This series implements a number of cleanups to make the code simpler
and easier to follow.  No functional changes intended.

George Dunlap (3):
  x86/mm: Use a more descriptive name for pagetable mfns
  x86/mm: Use mfn_t in type get / put call tree
  x86/mm: More discriptive names for page de/validation functions

 xen/arch/x86/domain.c    |   2 +-
 xen/arch/x86/mm.c        | 161 +++++++++++++++++++--------------------
 xen/include/asm-x86/mm.h |   4 +-
 3 files changed, 82 insertions(+), 85 deletions(-)

--
CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jan Beulich <jbeulich@suse.com>
2.24.0

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Xen-devel] [PATCH 1/3] x86/mm: Use a more descriptive name for pagetable mfns
  2019-12-13 17:37 [Xen-devel] [PATCH 0/3] Post-299 cleanups George Dunlap
@ 2019-12-13 17:37 ` George Dunlap
  2019-12-16 11:07   ` Jan Beulich
  2019-12-13 17:37 ` [Xen-devel] [PATCH 2/3] x86/mm: Use mfn_t in type get / put call tree George Dunlap
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 11+ messages in thread
From: George Dunlap @ 2019-12-13 17:37 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, George Dunlap, Jan Beulich

In many places, a PTE being modified is accompanied by the pagetable
mfn which contains the PTE (primarily in order to be able to maintain
linear mapping counts).  In many cases, this mfn is stored in the
non-descript variable (or argement) "pfn".

Replace these names with lNmfn, to indicate that 1) this is a
pagetable mfn, and 2) that it is the same level as the PTE in
question.  This should be enough to remind readers that it's the mfn
containing the PTE.

No functional change.

Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
v2:
- Also rename arguments for put_page_from_lNe

CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/mm.c | 68 +++++++++++++++++++++++------------------------
 1 file changed, 34 insertions(+), 34 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 9556e8f780..ceb656ca75 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1141,7 +1141,7 @@ static int get_page_and_type_from_mfn(
 define_get_linear_pagetable(l2);
 static int
 get_page_from_l2e(
-    l2_pgentry_t l2e, unsigned long pfn, struct domain *d, unsigned int flags)
+    l2_pgentry_t l2e, unsigned long l2mfn, struct domain *d, unsigned int flags)
 {
     unsigned long mfn = l2e_get_pfn(l2e);
     int rc;
@@ -1156,7 +1156,7 @@ get_page_from_l2e(
     ASSERT(!(flags & PTF_preemptible));
 
     rc = get_page_and_type_from_mfn(_mfn(mfn), PGT_l1_page_table, d, flags);
-    if ( unlikely(rc == -EINVAL) && get_l2_linear_pagetable(l2e, pfn, d) )
+    if ( unlikely(rc == -EINVAL) && get_l2_linear_pagetable(l2e, l2mfn, d) )
         rc = 0;
 
     return rc;
@@ -1165,7 +1165,7 @@ get_page_from_l2e(
 define_get_linear_pagetable(l3);
 static int
 get_page_from_l3e(
-    l3_pgentry_t l3e, unsigned long pfn, struct domain *d, unsigned int flags)
+    l3_pgentry_t l3e, unsigned long l3mfn, struct domain *d, unsigned int flags)
 {
     int rc;
 
@@ -1180,7 +1180,7 @@ get_page_from_l3e(
         l3e_get_mfn(l3e), PGT_l2_page_table, d, flags | PTF_preemptible);
     if ( unlikely(rc == -EINVAL) &&
          !is_pv_32bit_domain(d) &&
-         get_l3_linear_pagetable(l3e, pfn, d) )
+         get_l3_linear_pagetable(l3e, l3mfn, d) )
         rc = 0;
 
     return rc;
@@ -1189,7 +1189,7 @@ get_page_from_l3e(
 define_get_linear_pagetable(l4);
 static int
 get_page_from_l4e(
-    l4_pgentry_t l4e, unsigned long pfn, struct domain *d, unsigned int flags)
+    l4_pgentry_t l4e, unsigned long l4mfn, struct domain *d, unsigned int flags)
 {
     int rc;
 
@@ -1202,7 +1202,7 @@ get_page_from_l4e(
 
     rc = get_page_and_type_from_mfn(
         l4e_get_mfn(l4e), PGT_l3_page_table, d, flags | PTF_preemptible);
-    if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, pfn, d) )
+    if ( unlikely(rc == -EINVAL) && get_l4_linear_pagetable(l4e, l4mfn, d) )
         rc = 0;
 
     return rc;
@@ -1329,10 +1329,10 @@ static int put_data_pages(struct page_info *page, bool writeable, int pt_shift)
  * NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'.
  * Note also that this automatically deals correctly with linear p.t.'s.
  */
-static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn,
+static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long l2mfn,
                              unsigned int flags)
 {
-    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || (l2e_get_pfn(l2e) == pfn) )
+    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || (l2e_get_pfn(l2e) == l2mfn) )
         return 1;
 
     if ( l2e_get_flags(l2e) & _PAGE_PSE )
@@ -1340,13 +1340,13 @@ static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn,
                               l2e_get_flags(l2e) & _PAGE_RW,
                               L2_PAGETABLE_SHIFT);
 
-    return put_pt_page(l2e_get_page(l2e), mfn_to_page(_mfn(pfn)), flags);
+    return put_pt_page(l2e_get_page(l2e), mfn_to_page(_mfn(l2mfn)), flags);
 }
 
-static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn,
+static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long l3mfn,
                              unsigned int flags)
 {
-    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_pfn(l3e) == pfn) )
+    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_pfn(l3e) == l3mfn) )
         return 1;
 
     if ( unlikely(l3e_get_flags(l3e) & _PAGE_PSE) )
@@ -1354,16 +1354,16 @@ static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn,
                               l3e_get_flags(l3e) & _PAGE_RW,
                               L3_PAGETABLE_SHIFT);
 
-    return put_pt_page(l3e_get_page(l3e), mfn_to_page(_mfn(pfn)), flags);
+    return put_pt_page(l3e_get_page(l3e), mfn_to_page(_mfn(l3mfn)), flags);
 }
 
-static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn,
+static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long l4mfn,
                              unsigned int flags)
 {
-    if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) || (l4e_get_pfn(l4e) == pfn) )
+    if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) || (l4e_get_pfn(l4e) == l4mfn) )
         return 1;
 
-    return put_pt_page(l4e_get_page(l4e), mfn_to_page(_mfn(pfn)), flags);
+    return put_pt_page(l4e_get_page(l4e), mfn_to_page(_mfn(l4mfn)), flags);
 }
 
 static int alloc_l1_table(struct page_info *page)
@@ -1460,13 +1460,13 @@ static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
 static int alloc_l2_table(struct page_info *page, unsigned long type)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  pfn = mfn_x(page_to_mfn(page));
+    unsigned long  l2mfn = mfn_x(page_to_mfn(page));
     l2_pgentry_t  *pl2e;
     unsigned int   i;
     int            rc = 0;
     unsigned int   partial_flags = page->partial_flags;
 
-    pl2e = map_domain_page(_mfn(pfn));
+    pl2e = map_domain_page(_mfn(l2mfn));
 
     /*
      * NB that alloc_l2_table will never set partial_pte on an l2; but
@@ -1492,7 +1492,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type)
             rc = -EINTR;
         }
         else
-            rc = get_page_from_l2e(l2e, pfn, d, partial_flags);
+            rc = get_page_from_l2e(l2e, l2mfn, d, partial_flags);
 
         /*
          * It shouldn't be possible for get_page_from_l2e to return
@@ -1559,14 +1559,14 @@ static int alloc_l2_table(struct page_info *page, unsigned long type)
 static int alloc_l3_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  pfn = mfn_x(page_to_mfn(page));
+    unsigned long  l3mfn = mfn_x(page_to_mfn(page));
     l3_pgentry_t  *pl3e;
     unsigned int   i;
     int            rc = 0;
     unsigned int   partial_flags = page->partial_flags;
     l3_pgentry_t   l3e = l3e_empty();
 
-    pl3e = map_domain_page(_mfn(pfn));
+    pl3e = map_domain_page(_mfn(l3mfn));
 
     /*
      * PAE guests allocate full pages, but aren't required to initialize
@@ -1603,7 +1603,7 @@ static int alloc_l3_table(struct page_info *page)
             rc = -EINTR;
         }
         else
-            rc = get_page_from_l3e(l3e, pfn, d,
+            rc = get_page_from_l3e(l3e, l3mfn, d,
                                    partial_flags | PTF_retain_ref_on_restart);
 
         if ( rc == -ERESTART )
@@ -1786,8 +1786,8 @@ void zap_ro_mpt(mfn_t mfn)
 static int alloc_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  pfn = mfn_x(page_to_mfn(page));
-    l4_pgentry_t  *pl4e = map_domain_page(_mfn(pfn));
+    unsigned long  l4mfn = mfn_x(page_to_mfn(page));
+    l4_pgentry_t  *pl4e = map_domain_page(_mfn(l4mfn));
     unsigned int   i;
     int            rc = 0;
     unsigned int   partial_flags = page->partial_flags;
@@ -1809,7 +1809,7 @@ static int alloc_l4_table(struct page_info *page)
             rc = -EINTR;
         }
         else
-            rc = get_page_from_l4e(l4e, pfn, d,
+            rc = get_page_from_l4e(l4e, l4mfn, d,
                                    partial_flags | PTF_retain_ref_on_restart);
 
         if ( rc == -ERESTART )
@@ -1869,7 +1869,7 @@ static int alloc_l4_table(struct page_info *page)
 
     if ( !rc )
     {
-        init_xen_l4_slots(pl4e, _mfn(pfn),
+        init_xen_l4_slots(pl4e, _mfn(l4mfn),
                           d, INVALID_MFN, VM_ASSIST(d, m2p_strict));
         atomic_inc(&d->arch.pv.nr_l4_pages);
     }
@@ -1896,18 +1896,18 @@ static void free_l1_table(struct page_info *page)
 static int free_l2_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long pfn = mfn_x(page_to_mfn(page));
+    unsigned long l2mfn = mfn_x(page_to_mfn(page));
     l2_pgentry_t *pl2e;
     int rc = 0;
     unsigned int partial_flags = page->partial_flags,
         i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set);
 
-    pl2e = map_domain_page(_mfn(pfn));
+    pl2e = map_domain_page(_mfn(l2mfn));
 
     for ( ; ; )
     {
         if ( is_guest_l2_slot(d, page->u.inuse.type_info, i) )
-            rc = put_page_from_l2e(pl2e[i], pfn, partial_flags);
+            rc = put_page_from_l2e(pl2e[i], l2mfn, partial_flags);
         if ( rc < 0 )
             break;
 
@@ -1948,17 +1948,17 @@ static int free_l2_table(struct page_info *page)
 static int free_l3_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long pfn = mfn_x(page_to_mfn(page));
+    unsigned long l3mfn = mfn_x(page_to_mfn(page));
     l3_pgentry_t *pl3e;
     int rc = 0;
     unsigned int partial_flags = page->partial_flags,
         i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set);
 
-    pl3e = map_domain_page(_mfn(pfn));
+    pl3e = map_domain_page(_mfn(l3mfn));
 
     for ( ; ; )
     {
-        rc = put_page_from_l3e(pl3e[i], pfn, partial_flags);
+        rc = put_page_from_l3e(pl3e[i], l3mfn, partial_flags);
         if ( rc < 0 )
             break;
 
@@ -1995,15 +1995,15 @@ static int free_l3_table(struct page_info *page)
 static int free_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long pfn = mfn_x(page_to_mfn(page));
-    l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn));
+    unsigned long l4mfn = mfn_x(page_to_mfn(page));
+    l4_pgentry_t *pl4e = map_domain_page(_mfn(l4mfn));
     int rc = 0;
     unsigned partial_flags = page->partial_flags,
         i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set);
 
     do {
         if ( is_guest_l4_slot(d, i) )
-            rc = put_page_from_l4e(pl4e[i], pfn, partial_flags);
+            rc = put_page_from_l4e(pl4e[i], l4mfn, partial_flags);
         if ( rc < 0 )
             break;
         partial_flags = 0;
-- 
2.24.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Xen-devel] [PATCH 2/3] x86/mm: Use mfn_t in type get / put call tree
  2019-12-13 17:37 [Xen-devel] [PATCH 0/3] Post-299 cleanups George Dunlap
  2019-12-13 17:37 ` [Xen-devel] [PATCH 1/3] x86/mm: Use a more descriptive name for pagetable mfns George Dunlap
@ 2019-12-13 17:37 ` George Dunlap
  2019-12-16 11:10   ` Jan Beulich
  2019-12-13 17:37 ` [Xen-devel] [PATCH 3/3] x86/mm: More discriptive names for page de/validation functions George Dunlap
  2019-12-16 13:24 ` [Xen-devel] [PATCH 0/3] Post-299 cleanups Andrew Cooper
  3 siblings, 1 reply; 11+ messages in thread
From: George Dunlap @ 2019-12-13 17:37 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, George Dunlap, Jan Beulich

Replace `unsigned long` with `mfn_t` as appropriate throughout
alloc/free_lN_table, get/put_page_from_lNe, and
get_lN_linear_pagetable.  This obviates the need for a load of
`mfn_x()` and `_mfn()` casts.

Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
New in v2.

CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/mm.c | 77 +++++++++++++++++++++++------------------------
 1 file changed, 37 insertions(+), 40 deletions(-)

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index ceb656ca75..55f9a581b4 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -681,10 +681,10 @@ boolean_param("pv-linear-pt", opt_pv_linear_pt);
 #define define_get_linear_pagetable(level)                                  \
 static int                                                                  \
 get_##level##_linear_pagetable(                                             \
-    level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d)         \
+    level##_pgentry_t pde, mfn_t pde_pfn, struct domain *d)                 \
 {                                                                           \
     unsigned long x, y;                                                     \
-    unsigned long pfn;                                                      \
+    mfn_t pfn;                                                              \
                                                                             \
     if ( !opt_pv_linear_pt )                                                \
     {                                                                       \
@@ -700,16 +700,16 @@ get_##level##_linear_pagetable(                                             \
         return 0;                                                           \
     }                                                                       \
                                                                             \
-    if ( (pfn = level##e_get_pfn(pde)) != pde_pfn )                         \
+    if ( !mfn_eq(pfn = level##e_get_mfn(pde), pde_pfn) )                    \
     {                                                                       \
-        struct page_info *page, *ptpg = mfn_to_page(_mfn(pde_pfn));         \
+        struct page_info *page, *ptpg = mfn_to_page(pde_pfn);               \
                                                                             \
         /* Make sure the page table belongs to the correct domain. */       \
         if ( unlikely(page_get_owner(ptpg) != d) )                          \
             return 0;                                                       \
                                                                             \
         /* Make sure the mapped frame belongs to the correct domain. */     \
-        page = get_page_from_mfn(_mfn(pfn), d);                             \
+        page = get_page_from_mfn(pfn, d);                                   \
         if ( unlikely(!page) )                                              \
             return 0;                                                       \
                                                                             \
@@ -755,7 +755,7 @@ get_##level##_linear_pagetable(                                             \
 #define define_get_linear_pagetable(level)                              \
 static int                                                              \
 get_##level##_linear_pagetable(                                         \
-        level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d) \
+        level##_pgentry_t pde, mfn_t pde_pfn, struct domain *d)         \
 {                                                                       \
         return 0;                                                       \
 }
@@ -1141,7 +1141,7 @@ static int get_page_and_type_from_mfn(
 define_get_linear_pagetable(l2);
 static int
 get_page_from_l2e(
-    l2_pgentry_t l2e, unsigned long l2mfn, struct domain *d, unsigned int flags)
+    l2_pgentry_t l2e, mfn_t l2mfn, struct domain *d, unsigned int flags)
 {
     unsigned long mfn = l2e_get_pfn(l2e);
     int rc;
@@ -1165,7 +1165,7 @@ get_page_from_l2e(
 define_get_linear_pagetable(l3);
 static int
 get_page_from_l3e(
-    l3_pgentry_t l3e, unsigned long l3mfn, struct domain *d, unsigned int flags)
+    l3_pgentry_t l3e, mfn_t l3mfn, struct domain *d, unsigned int flags)
 {
     int rc;
 
@@ -1189,7 +1189,7 @@ get_page_from_l3e(
 define_get_linear_pagetable(l4);
 static int
 get_page_from_l4e(
-    l4_pgentry_t l4e, unsigned long l4mfn, struct domain *d, unsigned int flags)
+    l4_pgentry_t l4e, mfn_t l4mfn, struct domain *d, unsigned int flags)
 {
     int rc;
 
@@ -1329,10 +1329,9 @@ static int put_data_pages(struct page_info *page, bool writeable, int pt_shift)
  * NB. Virtual address 'l2e' maps to a machine address within frame 'pfn'.
  * Note also that this automatically deals correctly with linear p.t.'s.
  */
-static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long l2mfn,
-                             unsigned int flags)
+static int put_page_from_l2e(l2_pgentry_t l2e, mfn_t l2mfn, unsigned int flags)
 {
-    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || (l2e_get_pfn(l2e) == l2mfn) )
+    if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || mfn_eq(l2e_get_mfn(l2e), l2mfn) )
         return 1;
 
     if ( l2e_get_flags(l2e) & _PAGE_PSE )
@@ -1340,13 +1339,12 @@ static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long l2mfn,
                               l2e_get_flags(l2e) & _PAGE_RW,
                               L2_PAGETABLE_SHIFT);
 
-    return put_pt_page(l2e_get_page(l2e), mfn_to_page(_mfn(l2mfn)), flags);
+    return put_pt_page(l2e_get_page(l2e), mfn_to_page(l2mfn), flags);
 }
 
-static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long l3mfn,
-                             unsigned int flags)
+static int put_page_from_l3e(l3_pgentry_t l3e, mfn_t l3mfn, unsigned int flags)
 {
-    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_pfn(l3e) == l3mfn) )
+    if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || mfn_eq(l3e_get_mfn(l3e), l3mfn) )
         return 1;
 
     if ( unlikely(l3e_get_flags(l3e) & _PAGE_PSE) )
@@ -1354,16 +1352,15 @@ static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long l3mfn,
                               l3e_get_flags(l3e) & _PAGE_RW,
                               L3_PAGETABLE_SHIFT);
 
-    return put_pt_page(l3e_get_page(l3e), mfn_to_page(_mfn(l3mfn)), flags);
+    return put_pt_page(l3e_get_page(l3e), mfn_to_page(l3mfn), flags);
 }
 
-static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long l4mfn,
-                             unsigned int flags)
+static int put_page_from_l4e(l4_pgentry_t l4e, mfn_t l4mfn, unsigned int flags)
 {
-    if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) || (l4e_get_pfn(l4e) == l4mfn) )
+    if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) || mfn_eq(l4e_get_mfn(l4e), l4mfn) )
         return 1;
 
-    return put_pt_page(l4e_get_page(l4e), mfn_to_page(_mfn(l4mfn)), flags);
+    return put_pt_page(l4e_get_page(l4e), mfn_to_page(l4mfn), flags);
 }
 
 static int alloc_l1_table(struct page_info *page)
@@ -1460,13 +1457,13 @@ static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
 static int alloc_l2_table(struct page_info *page, unsigned long type)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  l2mfn = mfn_x(page_to_mfn(page));
+    mfn_t         l2mfn = page_to_mfn(page);
     l2_pgentry_t  *pl2e;
     unsigned int   i;
     int            rc = 0;
     unsigned int   partial_flags = page->partial_flags;
 
-    pl2e = map_domain_page(_mfn(l2mfn));
+    pl2e = map_domain_page(l2mfn);
 
     /*
      * NB that alloc_l2_table will never set partial_pte on an l2; but
@@ -1559,14 +1556,14 @@ static int alloc_l2_table(struct page_info *page, unsigned long type)
 static int alloc_l3_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  l3mfn = mfn_x(page_to_mfn(page));
+    mfn_t          l3mfn = page_to_mfn(page);
     l3_pgentry_t  *pl3e;
     unsigned int   i;
     int            rc = 0;
     unsigned int   partial_flags = page->partial_flags;
     l3_pgentry_t   l3e = l3e_empty();
 
-    pl3e = map_domain_page(_mfn(l3mfn));
+    pl3e = map_domain_page(l3mfn);
 
     /*
      * PAE guests allocate full pages, but aren't required to initialize
@@ -1786,8 +1783,8 @@ void zap_ro_mpt(mfn_t mfn)
 static int alloc_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long  l4mfn = mfn_x(page_to_mfn(page));
-    l4_pgentry_t  *pl4e = map_domain_page(_mfn(l4mfn));
+    mfn_t          l4mfn = page_to_mfn(page);
+    l4_pgentry_t  *pl4e = map_domain_page(l4mfn);
     unsigned int   i;
     int            rc = 0;
     unsigned int   partial_flags = page->partial_flags;
@@ -1869,7 +1866,7 @@ static int alloc_l4_table(struct page_info *page)
 
     if ( !rc )
     {
-        init_xen_l4_slots(pl4e, _mfn(l4mfn),
+        init_xen_l4_slots(pl4e, l4mfn,
                           d, INVALID_MFN, VM_ASSIST(d, m2p_strict));
         atomic_inc(&d->arch.pv.nr_l4_pages);
     }
@@ -1896,13 +1893,13 @@ static void free_l1_table(struct page_info *page)
 static int free_l2_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long l2mfn = mfn_x(page_to_mfn(page));
+    mfn_t l2mfn = page_to_mfn(page);
     l2_pgentry_t *pl2e;
     int rc = 0;
     unsigned int partial_flags = page->partial_flags,
         i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set);
 
-    pl2e = map_domain_page(_mfn(l2mfn));
+    pl2e = map_domain_page(l2mfn);
 
     for ( ; ; )
     {
@@ -1948,13 +1945,13 @@ static int free_l2_table(struct page_info *page)
 static int free_l3_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long l3mfn = mfn_x(page_to_mfn(page));
+    mfn_t l3mfn = page_to_mfn(page);
     l3_pgentry_t *pl3e;
     int rc = 0;
     unsigned int partial_flags = page->partial_flags,
         i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set);
 
-    pl3e = map_domain_page(_mfn(l3mfn));
+    pl3e = map_domain_page(l3mfn);
 
     for ( ; ; )
     {
@@ -1995,8 +1992,8 @@ static int free_l3_table(struct page_info *page)
 static int free_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
-    unsigned long l4mfn = mfn_x(page_to_mfn(page));
-    l4_pgentry_t *pl4e = map_domain_page(_mfn(l4mfn));
+    mfn_t l4mfn = page_to_mfn(page);
+    l4_pgentry_t *pl4e = map_domain_page(l4mfn);
     int rc = 0;
     unsigned partial_flags = page->partial_flags,
         i = page->nr_validated_ptes - !(partial_flags & PTF_partial_set);
@@ -2275,7 +2272,7 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
             return -EBUSY;
         }
 
-        if ( unlikely((rc = get_page_from_l2e(nl2e, mfn_x(mfn), d, 0)) < 0) )
+        if ( unlikely((rc = get_page_from_l2e(nl2e, mfn, d, 0)) < 0) )
             return rc;
 
         nl2e = adjust_guest_l2e(nl2e, d);
@@ -2294,7 +2291,7 @@ static int mod_l2_entry(l2_pgentry_t *pl2e,
         return -EBUSY;
     }
 
-    put_page_from_l2e(ol2e, mfn_x(mfn), PTF_defer);
+    put_page_from_l2e(ol2e, mfn, PTF_defer);
 
     return rc;
 }
@@ -2337,7 +2334,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
             return rc ? 0 : -EFAULT;
         }
 
-        rc = get_page_from_l3e(nl3e, mfn_x(mfn), d, 0);
+        rc = get_page_from_l3e(nl3e, mfn, d, 0);
         if ( unlikely(rc < 0) )
             return rc;
         rc = 0;
@@ -2362,7 +2359,7 @@ static int mod_l3_entry(l3_pgentry_t *pl3e,
         if ( !create_pae_xen_mappings(d, pl3e) )
             BUG();
 
-    put_page_from_l3e(ol3e, mfn_x(mfn), PTF_defer);
+    put_page_from_l3e(ol3e, mfn, PTF_defer);
     return rc;
 }
 
@@ -2404,7 +2401,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
             return rc ? 0 : -EFAULT;
         }
 
-        rc = get_page_from_l4e(nl4e, mfn_x(mfn), d, 0);
+        rc = get_page_from_l4e(nl4e, mfn, d, 0);
         if ( unlikely(rc < 0) )
             return rc;
         rc = 0;
@@ -2425,7 +2422,7 @@ static int mod_l4_entry(l4_pgentry_t *pl4e,
         return -EFAULT;
     }
 
-    put_page_from_l4e(ol4e, mfn_x(mfn), PTF_defer);
+    put_page_from_l4e(ol4e, mfn, PTF_defer);
     return rc;
 }
 #endif /* CONFIG_PV */
-- 
2.24.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Xen-devel] [PATCH 3/3] x86/mm: More discriptive names for page de/validation functions
  2019-12-13 17:37 [Xen-devel] [PATCH 0/3] Post-299 cleanups George Dunlap
  2019-12-13 17:37 ` [Xen-devel] [PATCH 1/3] x86/mm: Use a more descriptive name for pagetable mfns George Dunlap
  2019-12-13 17:37 ` [Xen-devel] [PATCH 2/3] x86/mm: Use mfn_t in type get / put call tree George Dunlap
@ 2019-12-13 17:37 ` George Dunlap
  2019-12-16 11:15   ` Jan Beulich
  2019-12-16 13:24 ` [Xen-devel] [PATCH 0/3] Post-299 cleanups Andrew Cooper
  3 siblings, 1 reply; 11+ messages in thread
From: George Dunlap @ 2019-12-13 17:37 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, George Dunlap, Jan Beulich

The functions alloc_page_type(), alloc_lN_table(), free_page_type()
and free_lN_table() are confusingly named: nothing is being allocated
or freed.  Rather, the page being passed in is being either validated
or devalidated for use as the specific type; in the specific case of
pagetables, these may be promoted or demoted (i.e., grab appropriate
references for PTEs).

Rename alloc_page_type() and free_page_type() to validate_page() and
devalidate_page().  Also rename alloc_segdesc_page() to
validate_segdesc_page(), since this is what it's doing.

Rename alloc_lN_table() and free_lN_table() to promote_lN_table() and
demote_lN_table(), respectively.

After this change:
- get / put type consistenly refer to increasing or decreasing the count
- validate / devalidate consistently refers to actions done when a
type count goes 0 -> 1 or 1 -> 0
- promote / demote consistenly refers to acquiring or freeing
resources (in the form of type refs and general references) in order
to allow a page to be used as a pagetable.

No functional change.

Signed-off-by: George Dunlap <george.dunlap@citrix.com>
---
v2:
- Rebase onto staging
- Also rename alloc_segdesc_page
- Drop superfluous '_type' from validate_page / devalidate_page
- Use promote / demote for _lN_table

CC: Andrew Cooper <andrew.cooper3@citrix.com>
CC: Jan Beulich <jbeulich@suse.com>
---
 xen/arch/x86/domain.c    |  2 +-
 xen/arch/x86/mm.c        | 66 ++++++++++++++++++++--------------------
 xen/include/asm-x86/mm.h |  4 +--
 3 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index bed19fc4dc..7cb7fd31dd 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -2032,7 +2032,7 @@ static int relinquish_memory(
             if ( likely(y == x) )
             {
                 /* No need for atomic update of type_info here: noone else updates it. */
-                switch ( ret = free_page_type(page, x, 1) )
+                switch ( ret = devalidate_page(page, x, 1) )
                 {
                 case 0:
                     break;
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 55f9a581b4..a711e8c78d 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -587,7 +587,7 @@ const char __section(".bss.page_aligned.const") __aligned(PAGE_SIZE)
 
 
 #ifdef CONFIG_PV
-static int alloc_segdesc_page(struct page_info *page)
+static int validate_segdesc_page(struct page_info *page)
 {
     const struct domain *owner = page_get_owner(page);
     seg_desc_t *descs = __map_domain_page(page);
@@ -1363,7 +1363,7 @@ static int put_page_from_l4e(l4_pgentry_t l4e, mfn_t l4mfn, unsigned int flags)
     return put_pt_page(l4e_get_page(l4e), mfn_to_page(l4mfn), flags);
 }
 
-static int alloc_l1_table(struct page_info *page)
+static int promote_l1_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     l1_pgentry_t  *pl1e;
@@ -1405,7 +1405,7 @@ static int alloc_l1_table(struct page_info *page)
 
  fail:
     gdprintk(XENLOG_WARNING,
-             "Failure %d in alloc_l1_table: slot %#x\n", ret, i);
+             "Failure %d in promote_l1_table: slot %#x\n", ret, i);
  out:
     while ( i-- > 0 )
         put_page_from_l1e(pl1e[i], d);
@@ -1438,7 +1438,7 @@ static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
      *  1. Cannot appear in slots != 3 because get_page_type() checks the
      *     PGT_pae_xen_l2 flag, which is asserted iff the L2 appears in slot 3
      *  2. Cannot appear in another page table's L3:
-     *     a. alloc_l3_table() calls this function and this check will fail
+     *     a. promote_l3_table() calls this function and this check will fail
      *     b. mod_l3_entry() disallows updates to slot 3 in an existing table
      */
     page = l3e_get_page(l3e3);
@@ -1454,7 +1454,7 @@ static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
     return 1;
 }
 
-static int alloc_l2_table(struct page_info *page, unsigned long type)
+static int promote_l2_table(struct page_info *page, unsigned long type)
 {
     struct domain *d = page_get_owner(page);
     mfn_t         l2mfn = page_to_mfn(page);
@@ -1466,8 +1466,8 @@ static int alloc_l2_table(struct page_info *page, unsigned long type)
     pl2e = map_domain_page(l2mfn);
 
     /*
-     * NB that alloc_l2_table will never set partial_pte on an l2; but
-     * free_l2_table might if a linear_pagetable entry is interrupted
+     * NB that promote_l2_table will never set partial_pte on an l2; but
+     * demote_l2_table might if a linear_pagetable entry is interrupted
      * partway through de-validation.  In that circumstance,
      * get_page_from_l2e() will always return -EINVAL; and we must
      * retain the type ref by doing the normal partial_flags tracking.
@@ -1494,7 +1494,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type)
         /*
          * It shouldn't be possible for get_page_from_l2e to return
          * -ERESTART, since we never call this with PTF_preemptible.
-         * (alloc_l1_table may return -EINTR on an L1TF-vulnerable
+         * (promote_l1_table may return -EINTR on an L1TF-vulnerable
          * entry.)
          *
          * NB that while on a "clean" promotion, we can never get
@@ -1515,12 +1515,12 @@ static int alloc_l2_table(struct page_info *page, unsigned long type)
         else if ( rc < 0 && rc != -EINTR )
         {
             gdprintk(XENLOG_WARNING,
-                     "Failure %d in alloc_l2_table: slot %#x\n", rc, i);
+                     "Failure %d in promote_l2_table: slot %#x\n", rc, i);
             ASSERT(current->arch.old_guest_table == NULL);
             if ( i )
             {
                 /*
-                 * alloc_l1_table() doesn't set old_guest_table; it does
+                 * promote_l1_table() doesn't set old_guest_table; it does
                  * its own tear-down immediately on failure.  If it
                  * did we'd need to check it and set partial_flags as we
                  * do in alloc_l[34]_table().
@@ -1553,7 +1553,7 @@ static int alloc_l2_table(struct page_info *page, unsigned long type)
     return rc;
 }
 
-static int alloc_l3_table(struct page_info *page)
+static int promote_l3_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     mfn_t          l3mfn = page_to_mfn(page);
@@ -1626,7 +1626,7 @@ static int alloc_l3_table(struct page_info *page)
     if ( rc < 0 && rc != -ERESTART && rc != -EINTR )
     {
         gdprintk(XENLOG_WARNING,
-                 "Failure %d in alloc_l3_table: slot %#x\n", rc, i);
+                 "Failure %d in promote_l3_table: slot %#x\n", rc, i);
         if ( i )
         {
             page->nr_validated_ptes = i;
@@ -1677,7 +1677,7 @@ void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d)
  * Fill an L4 with Xen entries.
  *
  * This function must write all ROOT_PAGETABLE_PV_XEN_SLOTS, to clobber any
- * values a guest may have left there from alloc_l4_table().
+ * values a guest may have left there from promote_l4_table().
  *
  * l4t and l4mfn are mandatory, but l4mfn doesn't need to be the mfn under
  * *l4t.  All other parameters are optional and will either fill or zero the
@@ -1780,7 +1780,7 @@ void zap_ro_mpt(mfn_t mfn)
 }
 
 #ifdef CONFIG_PV
-static int alloc_l4_table(struct page_info *page)
+static int promote_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     mfn_t          l4mfn = page_to_mfn(page);
@@ -1819,7 +1819,7 @@ static int alloc_l4_table(struct page_info *page)
         {
             if ( rc != -EINTR )
                 gdprintk(XENLOG_WARNING,
-                         "Failure %d in alloc_l4_table: slot %#x\n", rc, i);
+                         "Failure %d in promote_l4_table: slot %#x\n", rc, i);
             if ( i )
             {
                 page->nr_validated_ptes = i;
@@ -1875,7 +1875,7 @@ static int alloc_l4_table(struct page_info *page)
     return rc;
 }
 
-static void free_l1_table(struct page_info *page)
+static void demote_l1_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     l1_pgentry_t *pl1e;
@@ -1890,7 +1890,7 @@ static void free_l1_table(struct page_info *page)
 }
 
 
-static int free_l2_table(struct page_info *page)
+static int demote_l2_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     mfn_t l2mfn = page_to_mfn(page);
@@ -1942,7 +1942,7 @@ static int free_l2_table(struct page_info *page)
     return rc;
 }
 
-static int free_l3_table(struct page_info *page)
+static int demote_l3_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     mfn_t l3mfn = page_to_mfn(page);
@@ -1989,7 +1989,7 @@ static int free_l3_table(struct page_info *page)
     return rc > 0 ? 0 : rc;
 }
 
-static int free_l4_table(struct page_info *page)
+static int demote_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     mfn_t l4mfn = page_to_mfn(page);
@@ -2589,7 +2589,7 @@ static void get_page_light(struct page_info *page)
     while ( unlikely(y != x) );
 }
 
-static int alloc_page_type(struct page_info *page, unsigned long type,
+static int validate_page(struct page_info *page, unsigned long type,
                            int preemptible)
 {
 #ifdef CONFIG_PV
@@ -2603,25 +2603,25 @@ static int alloc_page_type(struct page_info *page, unsigned long type,
     switch ( type & PGT_type_mask )
     {
     case PGT_l1_page_table:
-        rc = alloc_l1_table(page);
+        rc = promote_l1_table(page);
         break;
     case PGT_l2_page_table:
         ASSERT(preemptible);
-        rc = alloc_l2_table(page, type);
+        rc = promote_l2_table(page, type);
         break;
     case PGT_l3_page_table:
         ASSERT(preemptible);
-        rc = alloc_l3_table(page);
+        rc = promote_l3_table(page);
         break;
     case PGT_l4_page_table:
         ASSERT(preemptible);
-        rc = alloc_l4_table(page);
+        rc = promote_l4_table(page);
         break;
     case PGT_seg_desc_page:
-        rc = alloc_segdesc_page(page);
+        rc = validate_segdesc_page(page);
         break;
     default:
-        printk("Bad type in alloc_page_type %lx t=%" PRtype_info " c=%lx\n",
+        printk("Bad type in validate_page %lx t=%" PRtype_info " c=%lx\n",
                type, page->u.inuse.type_info,
                page->count_info);
         rc = -EINVAL;
@@ -2669,7 +2669,7 @@ static int alloc_page_type(struct page_info *page, unsigned long type,
 }
 
 
-int free_page_type(struct page_info *page, unsigned long type,
+int devalidate_page(struct page_info *page, unsigned long type,
                    int preemptible)
 {
 #ifdef CONFIG_PV
@@ -2697,20 +2697,20 @@ int free_page_type(struct page_info *page, unsigned long type,
     switch ( type & PGT_type_mask )
     {
     case PGT_l1_page_table:
-        free_l1_table(page);
+        demote_l1_table(page);
         rc = 0;
         break;
     case PGT_l2_page_table:
         ASSERT(preemptible);
-        rc = free_l2_table(page);
+        rc = demote_l2_table(page);
         break;
     case PGT_l3_page_table:
         ASSERT(preemptible);
-        rc = free_l3_table(page);
+        rc = demote_l3_table(page);
         break;
     case PGT_l4_page_table:
         ASSERT(preemptible);
-        rc = free_l4_table(page);
+        rc = demote_l4_table(page);
         break;
     default:
         gdprintk(XENLOG_WARNING, "type %" PRtype_info " mfn %" PRI_mfn "\n",
@@ -2730,7 +2730,7 @@ int free_page_type(struct page_info *page, unsigned long type,
 static int _put_final_page_type(struct page_info *page, unsigned long type,
                                 bool preemptible, struct page_info *ptpg)
 {
-    int rc = free_page_type(page, type, preemptible);
+    int rc = devalidate_page(page, type, preemptible);
 
     if ( ptpg && PGT_type_equal(type, ptpg->u.inuse.type_info) &&
          (type & PGT_validated) && rc != -EINTR )
@@ -3013,7 +3013,7 @@ static int _get_page_type(struct page_info *page, unsigned long type,
             page->partial_flags = 0;
             page->linear_pt_count = 0;
         }
-        rc = alloc_page_type(page, type, preemptible);
+        rc = validate_page(page, type, preemptible);
     }
 
  out:
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 320c6cd196..1479ba6703 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -355,8 +355,8 @@ static inline void *__page_to_virt(const struct page_info *pg)
                     (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg))));
 }
 
-int free_page_type(struct page_info *page, unsigned long type,
-                   int preemptible);
+int devalidate_page(struct page_info *page, unsigned long type,
+                         int preemptible);
 
 void init_xen_pae_l2_slots(l2_pgentry_t *l2t, const struct domain *d);
 void init_xen_l4_slots(l4_pgentry_t *l4t, mfn_t l4mfn,
-- 
2.24.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [Xen-devel] [PATCH 1/3] x86/mm: Use a more descriptive name for pagetable mfns
  2019-12-13 17:37 ` [Xen-devel] [PATCH 1/3] x86/mm: Use a more descriptive name for pagetable mfns George Dunlap
@ 2019-12-16 11:07   ` Jan Beulich
  2019-12-16 11:10     ` George Dunlap
  0 siblings, 1 reply; 11+ messages in thread
From: Jan Beulich @ 2019-12-16 11:07 UTC (permalink / raw)
  To: George Dunlap; +Cc: xen-devel, Andrew Cooper

On 13.12.2019 18:37, George Dunlap wrote:
> In many places, a PTE being modified is accompanied by the pagetable
> mfn which contains the PTE (primarily in order to be able to maintain
> linear mapping counts).  In many cases, this mfn is stored in the
> non-descript variable (or argement) "pfn".
> 
> Replace these names with lNmfn, to indicate that 1) this is a
> pagetable mfn, and 2) that it is the same level as the PTE in
> question.  This should be enough to remind readers that it's the mfn
> containing the PTE.
> 
> No functional change.
> 
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>

I take it you considered by dropped the idea of switching to mfn_t
at the same time, as suggested by Andrew on v1?

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Xen-devel] [PATCH 1/3] x86/mm: Use a more descriptive name for pagetable mfns
  2019-12-16 11:07   ` Jan Beulich
@ 2019-12-16 11:10     ` George Dunlap
  0 siblings, 0 replies; 11+ messages in thread
From: George Dunlap @ 2019-12-16 11:10 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Andrew Cooper

On 12/16/19 11:07 AM, Jan Beulich wrote:
> On 13.12.2019 18:37, George Dunlap wrote:
>> In many places, a PTE being modified is accompanied by the pagetable
>> mfn which contains the PTE (primarily in order to be able to maintain
>> linear mapping counts).  In many cases, this mfn is stored in the
>> non-descript variable (or argement) "pfn".
>>
>> Replace these names with lNmfn, to indicate that 1) this is a
>> pagetable mfn, and 2) that it is the same level as the PTE in
>> question.  This should be enough to remind readers that it's the mfn
>> containing the PTE.
>>
>> No functional change.
>>
>> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
> 
> Acked-by: Jan Beulich <jbeulich@suse.com>
> 
> I take it you considered by dropped the idea of switching to mfn_t
> at the same time, as suggested by Andrew on v1?

Me: <waits for Jan to get to patch 2>

:-)

I thought it would be easier to review as two patches, each of which did
exactly one thing.  Since this patch was already complete, it was just
as easy to make a second patch as to make one more-complicated-to-review
patch.

Thanks,
 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Xen-devel] [PATCH 2/3] x86/mm: Use mfn_t in type get / put call tree
  2019-12-13 17:37 ` [Xen-devel] [PATCH 2/3] x86/mm: Use mfn_t in type get / put call tree George Dunlap
@ 2019-12-16 11:10   ` Jan Beulich
  2019-12-16 11:13     ` George Dunlap
  0 siblings, 1 reply; 11+ messages in thread
From: Jan Beulich @ 2019-12-16 11:10 UTC (permalink / raw)
  To: George Dunlap; +Cc: xen-devel, Andrew Cooper

On 13.12.2019 18:37, George Dunlap wrote:
> Replace `unsigned long` with `mfn_t` as appropriate throughout
> alloc/free_lN_table, get/put_page_from_lNe, and
> get_lN_linear_pagetable.  This obviates the need for a load of
> `mfn_x()` and `_mfn()` casts.
> 
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>

Ah, here we go. Sorry for not spotting before giving the remark
on patch 1.

> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -681,10 +681,10 @@ boolean_param("pv-linear-pt", opt_pv_linear_pt);
>  #define define_get_linear_pagetable(level)                                  \
>  static int                                                                  \
>  get_##level##_linear_pagetable(                                             \
> -    level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d)         \
> +    level##_pgentry_t pde, mfn_t pde_pfn, struct domain *d)                 \

Perhaps better pde_mfn then here, ...

>  {                                                                           \
>      unsigned long x, y;                                                     \
> -    unsigned long pfn;                                                      \
> +    mfn_t pfn;                                                              \

... pfn here, and likewise elsewhere? If you agree,
Acked-by: Jan Beulich <jbeulich@suse.com>
with this renaming.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Xen-devel] [PATCH 2/3] x86/mm: Use mfn_t in type get / put call tree
  2019-12-16 11:10   ` Jan Beulich
@ 2019-12-16 11:13     ` George Dunlap
  2019-12-16 11:25       ` Jan Beulich
  0 siblings, 1 reply; 11+ messages in thread
From: George Dunlap @ 2019-12-16 11:13 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Andrew Cooper

On 12/16/19 11:10 AM, Jan Beulich wrote:
> On 13.12.2019 18:37, George Dunlap wrote:
>> Replace `unsigned long` with `mfn_t` as appropriate throughout
>> alloc/free_lN_table, get/put_page_from_lNe, and
>> get_lN_linear_pagetable.  This obviates the need for a load of
>> `mfn_x()` and `_mfn()` casts.
>>
>> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
> 
> Ah, here we go. Sorry for not spotting before giving the remark
> on patch 1.
> 
>> --- a/xen/arch/x86/mm.c
>> +++ b/xen/arch/x86/mm.c
>> @@ -681,10 +681,10 @@ boolean_param("pv-linear-pt", opt_pv_linear_pt);
>>  #define define_get_linear_pagetable(level)                                  \
>>  static int                                                                  \
>>  get_##level##_linear_pagetable(                                             \
>> -    level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d)         \
>> +    level##_pgentry_t pde, mfn_t pde_pfn, struct domain *d)                 \
> 
> Perhaps better pde_mfn then here, ...
> 
>>  {                                                                           \
>>      unsigned long x, y;                                                     \
>> -    unsigned long pfn;                                                      \
>> +    mfn_t pfn;                                                              \
> 
> ... pfn here, and likewise elsewhere?

Sorry, I get that you mean s/pde_pfn/pde_mfn/g; for the argument to this
function; but what do you want done with the `pfn` local variable?  Did
you mean to suggest `mfn` here as well?

 -George

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Xen-devel] [PATCH 3/3] x86/mm: More discriptive names for page de/validation functions
  2019-12-13 17:37 ` [Xen-devel] [PATCH 3/3] x86/mm: More discriptive names for page de/validation functions George Dunlap
@ 2019-12-16 11:15   ` Jan Beulich
  0 siblings, 0 replies; 11+ messages in thread
From: Jan Beulich @ 2019-12-16 11:15 UTC (permalink / raw)
  To: George Dunlap; +Cc: xen-devel, Andrew Cooper

On 13.12.2019 18:37, George Dunlap wrote:
> The functions alloc_page_type(), alloc_lN_table(), free_page_type()
> and free_lN_table() are confusingly named: nothing is being allocated
> or freed.  Rather, the page being passed in is being either validated
> or devalidated for use as the specific type; in the specific case of
> pagetables, these may be promoted or demoted (i.e., grab appropriate
> references for PTEs).
> 
> Rename alloc_page_type() and free_page_type() to validate_page() and
> devalidate_page().  Also rename alloc_segdesc_page() to
> validate_segdesc_page(), since this is what it's doing.
> 
> Rename alloc_lN_table() and free_lN_table() to promote_lN_table() and
> demote_lN_table(), respectively.
> 
> After this change:
> - get / put type consistenly refer to increasing or decreasing the count
> - validate / devalidate consistently refers to actions done when a
> type count goes 0 -> 1 or 1 -> 0
> - promote / demote consistenly refers to acquiring or freeing
> resources (in the form of type refs and general references) in order
> to allow a page to be used as a pagetable.
> 
> No functional change.
> 
> Signed-off-by: George Dunlap <george.dunlap@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Xen-devel] [PATCH 2/3] x86/mm: Use mfn_t in type get / put call tree
  2019-12-16 11:13     ` George Dunlap
@ 2019-12-16 11:25       ` Jan Beulich
  0 siblings, 0 replies; 11+ messages in thread
From: Jan Beulich @ 2019-12-16 11:25 UTC (permalink / raw)
  To: George Dunlap; +Cc: xen-devel, Andrew Cooper

On 16.12.2019 12:13, George Dunlap wrote:
> On 12/16/19 11:10 AM, Jan Beulich wrote:
>> On 13.12.2019 18:37, George Dunlap wrote:
>>> Replace `unsigned long` with `mfn_t` as appropriate throughout
>>> alloc/free_lN_table, get/put_page_from_lNe, and
>>> get_lN_linear_pagetable.  This obviates the need for a load of
>>> `mfn_x()` and `_mfn()` casts.
>>>
>>> Signed-off-by: George Dunlap <george.dunlap@citrix.com>
>>
>> Ah, here we go. Sorry for not spotting before giving the remark
>> on patch 1.
>>
>>> --- a/xen/arch/x86/mm.c
>>> +++ b/xen/arch/x86/mm.c
>>> @@ -681,10 +681,10 @@ boolean_param("pv-linear-pt", opt_pv_linear_pt);
>>>  #define define_get_linear_pagetable(level)                                  \
>>>  static int                                                                  \
>>>  get_##level##_linear_pagetable(                                             \
>>> -    level##_pgentry_t pde, unsigned long pde_pfn, struct domain *d)         \
>>> +    level##_pgentry_t pde, mfn_t pde_pfn, struct domain *d)                 \
>>
>> Perhaps better pde_mfn then here, ...
>>
>>>  {                                                                           \
>>>      unsigned long x, y;                                                     \
>>> -    unsigned long pfn;                                                      \
>>> +    mfn_t pfn;                                                              \
>>
>> ... pfn here, and likewise elsewhere?
> 
> Sorry, I get that you mean s/pde_pfn/pde_mfn/g; for the argument to this
> function; but what do you want done with the `pfn` local variable?  Did
> you mean to suggest `mfn` here as well?

Oops, yes, of course I did. Sorry.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Xen-devel] [PATCH 0/3] Post-299 cleanups
  2019-12-13 17:37 [Xen-devel] [PATCH 0/3] Post-299 cleanups George Dunlap
                   ` (2 preceding siblings ...)
  2019-12-13 17:37 ` [Xen-devel] [PATCH 3/3] x86/mm: More discriptive names for page de/validation functions George Dunlap
@ 2019-12-16 13:24 ` Andrew Cooper
  3 siblings, 0 replies; 11+ messages in thread
From: Andrew Cooper @ 2019-12-16 13:24 UTC (permalink / raw)
  To: George Dunlap, xen-devel; +Cc: Jan Beulich

On 13/12/2019 17:37, George Dunlap wrote:
> This series implements a number of cleanups to make the code simpler
> and easier to follow.  No functional changes intended.
>
> George Dunlap (3):
>   x86/mm: Use a more descriptive name for pagetable mfns
>   x86/mm: Use mfn_t in type get / put call tree
>   x86/mm: More discriptive names for page de/validation functions

Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2019-12-16 13:25 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-13 17:37 [Xen-devel] [PATCH 0/3] Post-299 cleanups George Dunlap
2019-12-13 17:37 ` [Xen-devel] [PATCH 1/3] x86/mm: Use a more descriptive name for pagetable mfns George Dunlap
2019-12-16 11:07   ` Jan Beulich
2019-12-16 11:10     ` George Dunlap
2019-12-13 17:37 ` [Xen-devel] [PATCH 2/3] x86/mm: Use mfn_t in type get / put call tree George Dunlap
2019-12-16 11:10   ` Jan Beulich
2019-12-16 11:13     ` George Dunlap
2019-12-16 11:25       ` Jan Beulich
2019-12-13 17:37 ` [Xen-devel] [PATCH 3/3] x86/mm: More discriptive names for page de/validation functions George Dunlap
2019-12-16 11:15   ` Jan Beulich
2019-12-16 13:24 ` [Xen-devel] [PATCH 0/3] Post-299 cleanups Andrew Cooper

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.