All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387
@ 2021-12-06 13:21 Jan Beulich
  2021-12-06 13:25 ` [PATCH v2 1/2] x86/paging: tidy paging_mfn_is_dirty() Jan Beulich
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Jan Beulich @ 2021-12-06 13:21 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Wei Liu, Roger Pau Monné, George Dunlap

1: tidy paging_mfn_is_dirty()
2: replace most mfn_valid() in log-dirty handling

Jan



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/2] x86/paging: tidy paging_mfn_is_dirty()
  2021-12-06 13:21 [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387 Jan Beulich
@ 2021-12-06 13:25 ` Jan Beulich
  2022-01-11 16:59   ` Andrew Cooper
  2021-12-06 13:25 ` [PATCH v2 2/2] x86/paging: replace most mfn_valid() in log-dirty handling Jan Beulich
  2022-01-04  9:55 ` Ping: [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387 Jan Beulich
  2 siblings, 1 reply; 6+ messages in thread
From: Jan Beulich @ 2021-12-06 13:25 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Wei Liu, Roger Pau Monné, George Dunlap

The function returning a boolean indicator, make it return bool. Also
constify its struct domain parameter, albeit requiring to also adjust
mm_locked_by_me(). Furthermore the function is used by shadow code only.

Since mm_locked_by_me() needs touching anyway, also switch its return
type to bool.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Change return type of mm_locked_by_me().

--- a/xen/arch/x86/mm/mm-locks.h
+++ b/xen/arch/x86/mm/mm-locks.h
@@ -40,7 +40,7 @@ static inline void mm_lock_init(mm_lock_
     l->unlock_level = 0;
 }
 
-static inline int mm_locked_by_me(mm_lock_t *l)
+static inline bool mm_locked_by_me(const mm_lock_t *l)
 {
     return (l->lock.recurse_cpu == current->processor);
 }
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -351,14 +351,14 @@ void paging_mark_dirty(struct domain *d,
     paging_mark_pfn_dirty(d, pfn);
 }
 
-
+#ifdef CONFIG_SHADOW_PAGING
 /* Is this guest page dirty? */
-int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
+bool paging_mfn_is_dirty(const struct domain *d, mfn_t gmfn)
 {
     pfn_t pfn;
     mfn_t mfn, *l4, *l3, *l2;
     unsigned long *l1;
-    int rv;
+    bool dirty;
 
     ASSERT(paging_locked_by_me(d));
     ASSERT(paging_mode_log_dirty(d));
@@ -367,36 +367,37 @@ int paging_mfn_is_dirty(struct domain *d
     pfn = _pfn(get_gpfn_from_mfn(mfn_x(gmfn)));
     /* Invalid pages can't be dirty. */
     if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
-        return 0;
+        return false;
 
     mfn = d->arch.paging.log_dirty.top;
     if ( !mfn_valid(mfn) )
-        return 0;
+        return false;
 
     l4 = map_domain_page(mfn);
     mfn = l4[L4_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l4);
     if ( !mfn_valid(mfn) )
-        return 0;
+        return false;
 
     l3 = map_domain_page(mfn);
     mfn = l3[L3_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l3);
     if ( !mfn_valid(mfn) )
-        return 0;
+        return false;
 
     l2 = map_domain_page(mfn);
     mfn = l2[L2_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l2);
     if ( !mfn_valid(mfn) )
-        return 0;
+        return false;
 
     l1 = map_domain_page(mfn);
-    rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1);
+    dirty = test_bit(L1_LOGDIRTY_IDX(pfn), l1);
     unmap_domain_page(l1);
-    return rv;
-}
 
+    return dirty;
+}
+#endif
 
 /* Read a domain's log-dirty bitmap and stats.  If the operation is a CLEAN,
  * clear the bitmap and stats as well. */
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -173,7 +173,7 @@ void paging_mark_pfn_dirty(struct domain
 
 /* is this guest page dirty? 
  * This is called from inside paging code, with the paging lock held. */
-int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn);
+bool paging_mfn_is_dirty(const struct domain *d, mfn_t gmfn);
 
 /*
  * Log-dirty radix tree indexing:



^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 2/2] x86/paging: replace most mfn_valid() in log-dirty handling
  2021-12-06 13:21 [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387 Jan Beulich
  2021-12-06 13:25 ` [PATCH v2 1/2] x86/paging: tidy paging_mfn_is_dirty() Jan Beulich
@ 2021-12-06 13:25 ` Jan Beulich
  2022-01-11 16:59   ` Andrew Cooper
  2022-01-04  9:55 ` Ping: [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387 Jan Beulich
  2 siblings, 1 reply; 6+ messages in thread
From: Jan Beulich @ 2021-12-06 13:25 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Wei Liu, Roger Pau Monné, George Dunlap

Top level table and intermediate table entries get explicitly set to
INVALID_MFN when un-allocated. There's therefore no need to use the more
expensive mfn_valid() when checking for that sentinel.

Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.

--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -74,7 +74,7 @@ static mfn_t paging_new_log_dirty_leaf(s
 {
     mfn_t mfn = paging_new_log_dirty_page(d);
 
-    if ( mfn_valid(mfn) )
+    if ( !mfn_eq(mfn, INVALID_MFN) )
         clear_domain_page(mfn);
 
     return mfn;
@@ -84,7 +84,8 @@ static mfn_t paging_new_log_dirty_leaf(s
 static mfn_t paging_new_log_dirty_node(struct domain *d)
 {
     mfn_t mfn = paging_new_log_dirty_page(d);
-    if ( mfn_valid(mfn) )
+
+    if ( !mfn_eq(mfn, INVALID_MFN) )
     {
         int i;
         mfn_t *node = map_domain_page(mfn);
@@ -98,7 +99,7 @@ static mfn_t paging_new_log_dirty_node(s
 /* get the top of the log-dirty bitmap trie */
 static mfn_t *paging_map_log_dirty_bitmap(struct domain *d)
 {
-    if ( likely(mfn_valid(d->arch.paging.log_dirty.top)) )
+    if ( likely(!mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN)) )
         return map_domain_page(d->arch.paging.log_dirty.top);
     return NULL;
 }
@@ -116,7 +117,7 @@ static int paging_free_log_dirty_bitmap(
 
     paging_lock(d);
 
-    if ( !mfn_valid(d->arch.paging.log_dirty.top) )
+    if ( mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN) )
     {
         paging_unlock(d);
         return 0;
@@ -143,20 +144,20 @@ static int paging_free_log_dirty_bitmap(
 
     for ( ; i4 < LOGDIRTY_NODE_ENTRIES; i4++, i3 = 0 )
     {
-        if ( !mfn_valid(l4[i4]) )
+        if ( mfn_eq(l4[i4], INVALID_MFN) )
             continue;
 
         l3 = map_domain_page(l4[i4]);
 
         for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
         {
-            if ( !mfn_valid(l3[i3]) )
+            if ( mfn_eq(l3[i3], INVALID_MFN) )
                 continue;
 
             l2 = map_domain_page(l3[i3]);
 
             for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
-                if ( mfn_valid(l2[i2]) )
+                if ( !mfn_eq(l2[i2], INVALID_MFN) )
                     paging_free_log_dirty_page(d, l2[i2]);
 
             unmap_domain_page(l2);
@@ -288,35 +289,35 @@ void paging_mark_pfn_dirty(struct domain
     /* Recursive: this is called from inside the shadow code */
     paging_lock_recursive(d);
 
-    if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) ) 
+    if ( unlikely(mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN)) )
     {
          d->arch.paging.log_dirty.top = paging_new_log_dirty_node(d);
-         if ( unlikely(!mfn_valid(d->arch.paging.log_dirty.top)) )
+         if ( unlikely(mfn_eq(d->arch.paging.log_dirty.top, INVALID_MFN)) )
              goto out;
     }
 
     l4 = paging_map_log_dirty_bitmap(d);
     mfn = l4[i4];
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         l4[i4] = mfn = paging_new_log_dirty_node(d);
     unmap_domain_page(l4);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         goto out;
 
     l3 = map_domain_page(mfn);
     mfn = l3[i3];
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         l3[i3] = mfn = paging_new_log_dirty_node(d);
     unmap_domain_page(l3);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         goto out;
 
     l2 = map_domain_page(mfn);
     mfn = l2[i2];
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         l2[i2] = mfn = paging_new_log_dirty_leaf(d);
     unmap_domain_page(l2);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         goto out;
 
     l1 = map_domain_page(mfn);
@@ -370,25 +371,25 @@ bool paging_mfn_is_dirty(const struct do
         return false;
 
     mfn = d->arch.paging.log_dirty.top;
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return false;
 
     l4 = map_domain_page(mfn);
     mfn = l4[L4_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l4);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return false;
 
     l3 = map_domain_page(mfn);
     mfn = l3[L3_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l3);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return false;
 
     l2 = map_domain_page(mfn);
     mfn = l2[L2_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l2);
-    if ( !mfn_valid(mfn) )
+    if ( mfn_eq(mfn, INVALID_MFN) )
         return false;
 
     l1 = map_domain_page(mfn);
@@ -477,17 +478,18 @@ static int paging_log_dirty_op(struct do
 
     for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
     {
-        l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(l4[i4]) : NULL;
+        l3 = ((l4 && !mfn_eq(l4[i4], INVALID_MFN)) ?
+              map_domain_page(l4[i4]) : NULL);
         for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
         {
-            l2 = ((l3 && mfn_valid(l3[i3])) ?
+            l2 = ((l3 && !mfn_eq(l3[i3], INVALID_MFN)) ?
                   map_domain_page(l3[i3]) : NULL);
             for ( i2 = 0;
                   (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
                   i2++ )
             {
                 unsigned int bytes = PAGE_SIZE;
-                l1 = ((l2 && mfn_valid(l2[i2])) ?
+                l1 = ((l2 && !mfn_eq(l2[i2], INVALID_MFN)) ?
                       map_domain_page(l2[i2]) : NULL);
                 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Ping: [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387
  2021-12-06 13:21 [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387 Jan Beulich
  2021-12-06 13:25 ` [PATCH v2 1/2] x86/paging: tidy paging_mfn_is_dirty() Jan Beulich
  2021-12-06 13:25 ` [PATCH v2 2/2] x86/paging: replace most mfn_valid() in log-dirty handling Jan Beulich
@ 2022-01-04  9:55 ` Jan Beulich
  2 siblings, 0 replies; 6+ messages in thread
From: Jan Beulich @ 2022-01-04  9:55 UTC (permalink / raw)
  To: Andrew Cooper, Roger Pau Monné; +Cc: Wei Liu, George Dunlap, xen-devel

On 06.12.2021 14:21, Jan Beulich wrote:
> 1: tidy paging_mfn_is_dirty()
> 2: replace most mfn_valid() in log-dirty handling

Any chance of getting an ack or otherwise here?

Thanks, Jan



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] x86/paging: tidy paging_mfn_is_dirty()
  2021-12-06 13:25 ` [PATCH v2 1/2] x86/paging: tidy paging_mfn_is_dirty() Jan Beulich
@ 2022-01-11 16:59   ` Andrew Cooper
  0 siblings, 0 replies; 6+ messages in thread
From: Andrew Cooper @ 2022-01-11 16:59 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Andrew Cooper, Wei Liu, Roger Pau Monné, George Dunlap

On 06/12/2021 13:25, Jan Beulich wrote:
> The function returning a boolean indicator, make it return bool. Also
> constify its struct domain parameter, albeit requiring to also adjust
> mm_locked_by_me(). Furthermore the function is used by shadow code only.
>
> Since mm_locked_by_me() needs touching anyway, also switch its return
> type to bool.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 2/2] x86/paging: replace most mfn_valid() in log-dirty handling
  2021-12-06 13:25 ` [PATCH v2 2/2] x86/paging: replace most mfn_valid() in log-dirty handling Jan Beulich
@ 2022-01-11 16:59   ` Andrew Cooper
  0 siblings, 0 replies; 6+ messages in thread
From: Andrew Cooper @ 2022-01-11 16:59 UTC (permalink / raw)
  To: Jan Beulich, xen-devel
  Cc: Andrew Cooper, Wei Liu, Roger Pau Monné, George Dunlap

On 06/12/2021 13:25, Jan Beulich wrote:
> Top level table and intermediate table entries get explicitly set to
> INVALID_MFN when un-allocated. There's therefore no need to use the more
> expensive mfn_valid() when checking for that sentinel.
>
> Suggested-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>


^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2022-01-11 17:00 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-06 13:21 [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387 Jan Beulich
2021-12-06 13:25 ` [PATCH v2 1/2] x86/paging: tidy paging_mfn_is_dirty() Jan Beulich
2022-01-11 16:59   ` Andrew Cooper
2021-12-06 13:25 ` [PATCH v2 2/2] x86/paging: replace most mfn_valid() in log-dirty handling Jan Beulich
2022-01-11 16:59   ` Andrew Cooper
2022-01-04  9:55 ` Ping: [PATCH v2 0/2] x86/paging: address observation made while working on XSA-387 Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.