All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mukesh Rathor <mukesh.rathor@oracle.com>
To: xen-devel@lists.xenproject.org
Cc: kevin.tian@intel.com, JBeulich@suse.com,
	George.Dunlap@eu.citrix.com, tim@xen.org, eddie.dong@intel.com,
	keir.xen@gmail.com, jun.nakajima@intel.com
Subject: [V2 PATCH 2/3] Rename "set_p2m_entry" to "p2m_set_entry"
Date: Wed,  9 Apr 2014 17:19:01 -0700	[thread overview]
Message-ID: <1397089142-12561-3-git-send-email-mukesh.rathor@oracle.com> (raw)
In-Reply-To: <1397089142-12561-1-git-send-email-mukesh.rathor@oracle.com>

This patch renames set_p2m_entry defined in arch/x86/mm/p2m.c
to p2m_set_entry which makes it consistent with other functions
from that file. It also facilitates changing the function signature
to return approriate errno for failure cases.  This patch doesn't
change any functionality.

Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
 xen/arch/x86/mm/hap/nested_hap.c |  2 +-
 xen/arch/x86/mm/mem_sharing.c    |  2 +-
 xen/arch/x86/mm/p2m-pod.c        | 32 +++++++++++++++------------
 xen/arch/x86/mm/p2m.c            | 47 ++++++++++++++++++++++------------------
 xen/include/asm-x86/p2m.h        |  2 +-
 5 files changed, 47 insertions(+), 38 deletions(-)

diff --git a/xen/arch/x86/mm/hap/nested_hap.c b/xen/arch/x86/mm/hap/nested_hap.c
index 38e2327..78701dc 100644
--- a/xen/arch/x86/mm/hap/nested_hap.c
+++ b/xen/arch/x86/mm/hap/nested_hap.c
@@ -124,7 +124,7 @@ nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m,
         gfn = (L2_gpa >> PAGE_SHIFT) & mask;
         mfn = _mfn((L0_gpa >> PAGE_SHIFT) & mask);
 
-        rv = set_p2m_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
+        rv = p2m_set_entry(p2m, gfn, mfn, page_order, p2mt, p2ma);
     }
 
     p2m_unlock(p2m);
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 237d346..0968681 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1092,7 +1092,7 @@ int mem_sharing_add_to_physmap(struct domain *sd, unsigned long sgfn, shr_handle
         goto err_unlock;
     }
 
-    ret = set_p2m_entry(p2m, cgfn, smfn, PAGE_ORDER_4K, p2m_ram_shared, a);
+    ret = p2m_set_entry(p2m, cgfn, smfn, PAGE_ORDER_4K, p2m_ram_shared, a);
 
     /* Tempted to turn this into an assert */
     if ( !ret )
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index d14565d..3c86255 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -556,7 +556,8 @@ recount:
     {
         /* All PoD: Mark the whole region invalid and tell caller
          * we're done. */
-        set_p2m_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid, p2m->default_access);
+        p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
+                      p2m->default_access);
         p2m->pod.entry_count-=(1<<order);
         BUG_ON(p2m->pod.entry_count < 0);
         ret = 1;
@@ -589,7 +590,8 @@ recount:
         mfn = p2m->get_entry(p2m, gpfn + i, &t, &a, 0, NULL);
         if ( t == p2m_populate_on_demand )
         {
-            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
+            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
+                          p2m->default_access);
             p2m->pod.entry_count--;
             BUG_ON(p2m->pod.entry_count < 0);
             pod--;
@@ -602,7 +604,8 @@ recount:
 
             page = mfn_to_page(mfn);
 
-            set_p2m_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid, p2m->default_access);
+            p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), 0, p2m_invalid,
+                          p2m->default_access);
             set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
 
             p2m_pod_cache_add(p2m, page, 0);
@@ -721,7 +724,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
     }
 
     /* Try to remove the page, restoring old mapping if it fails. */
-    set_p2m_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M,
+    p2m_set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_2M,
                   p2m_populate_on_demand, p2m->default_access);
 
     /* Make none of the MFNs are used elsewhere... for example, mapped
@@ -779,7 +782,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
 
 out_reset:
     if ( reset )
-        set_p2m_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
+        p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
     
 out:
     gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
@@ -837,7 +840,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
         }
 
         /* Try to remove the page, restoring old mapping if it fails. */
-        set_p2m_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K,
+        p2m_set_entry(p2m, gfns[i], _mfn(0), PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);
 
         /* See if the page was successfully unmapped.  (Allow one refcount
@@ -847,7 +850,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
             unmap_domain_page(map[i]);
             map[i] = NULL;
 
-            set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+            p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
                 types[i], p2m->default_access);
 
             continue;
@@ -870,7 +873,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
          * check timing.  */
         if ( j < PAGE_SIZE/sizeof(*map[i]) )
         {
-            set_p2m_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
+            p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
                 types[i], p2m->default_access);
         }
         else
@@ -1000,15 +1003,15 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
     {
         pod_unlock(p2m);
         gfn_aligned = (gfn >> order) << order;
-        /* Note that we are supposed to call set_p2m_entry() 512 times to 
+        /* Note that we are supposed to call p2m_set_entry() 512 times to
          * split 1GB into 512 2MB pages here. But We only do once here because
-         * set_p2m_entry() should automatically shatter the 1GB page into 
+         * p2m_set_entry() should automatically shatter the 1GB page into
          * 512 2MB pages. The rest of 511 calls are unnecessary.
          *
          * NOTE: In a fine-grained p2m locking scenario this operation
          * may need to promote its locking from gfn->1g superpage
          */
-        set_p2m_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
+        p2m_set_entry(p2m, gfn_aligned, _mfn(0), PAGE_ORDER_2M,
                       p2m_populate_on_demand, p2m->default_access);
         return 0;
     }
@@ -1037,7 +1040,8 @@ p2m_pod_demand_populate(struct p2m_domain *p2m, unsigned long gfn,
 
     gfn_aligned = (gfn >> order) << order;
 
-    set_p2m_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw, p2m->default_access);
+    p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
+                  p2m->default_access);
 
     for( i = 0; i < (1UL << order); i++ )
     {
@@ -1091,7 +1095,7 @@ remap_and_retry:
      * need promoting the gfn lock from gfn->2M superpage */
     gfn_aligned = (gfn>>order)<<order;
     for(i=0; i<(1<<order); i++)
-        set_p2m_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K,
+        p2m_set_entry(p2m, gfn_aligned+i, _mfn(0), PAGE_ORDER_4K,
                       p2m_populate_on_demand, p2m->default_access);
     if ( tb_init_done )
     {
@@ -1146,7 +1150,7 @@ guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
     }
 
     /* Now, actually do the two-way mapping */
-    if ( !set_p2m_entry(p2m, gfn, _mfn(0), order,
+    if ( !p2m_set_entry(p2m, gfn, _mfn(0), order,
                         p2m_populate_on_demand, p2m->default_access) )
         rc = -EINVAL;
     else
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c38f334..e16f31a 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -309,7 +309,7 @@ struct page_info *get_page_from_gfn_p2m(
 }
 
 
-int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
+int p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
                   unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
 {
     struct domain *d = p2m->domain;
@@ -417,7 +417,7 @@ int p2m_alloc_table(struct p2m_domain *p2m)
 
     /* Initialise physmap tables for slot zero. Other code assumes this. */
     p2m->defer_nested_flush = 1;
-    if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
+    if ( !p2m_set_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K,
                         p2m_invalid, p2m->default_access) )
         goto error;
     p2m->defer_nested_flush = 0;
@@ -496,7 +496,8 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
             ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
         }
     }
-    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access);
+    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid,
+                  p2m->default_access);
 }
 
 void
@@ -635,7 +636,8 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
     /* Now, actually do the two-way mapping */
     if ( mfn_valid(_mfn(mfn)) ) 
     {
-        if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t, p2m->default_access) )
+        if ( !p2m_set_entry(p2m, gfn, _mfn(mfn), page_order, t,
+                            p2m->default_access) )
         {
             rc = -EINVAL;
             goto out; /* Failed to update p2m, bail without updating m2p. */
@@ -650,7 +652,7 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
     {
         gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
                  gfn, mfn);
-        if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, 
+        if ( !p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
                             p2m_invalid, p2m->default_access) )
             rc = -EINVAL;
         else
@@ -685,7 +687,7 @@ p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
 
     mfn = p2m->get_entry(p2m, gfn, &pt, &a, 0, NULL);
     if ( pt == ot )
-        set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
+        p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, nt, p2m->default_access);
 
     gfn_unlock(p2m, gfn, 0);
 
@@ -726,7 +728,7 @@ void p2m_change_type_range(struct domain *d,
                 order = PAGE_ORDER_4K;
         }
         if ( pt == ot )
-            set_p2m_entry(p2m, gfn, mfn, order, nt, a);
+            p2m_set_entry(p2m, gfn, mfn, order, nt, a);
         gfn += 1UL << order;
         gfn &= -1UL << order;
         if ( !gfn )
@@ -768,11 +770,12 @@ set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
     }
 
     P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn));
-    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct, p2m->default_access);
+    rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct,
+                       p2m->default_access);
     gfn_unlock(p2m, gfn, 0);
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
-            "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+            "set_mmio_p2m_entry: p2m_set_entry failed! mfn=%08lx\n",
             mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
     return rc;
 }
@@ -799,7 +802,8 @@ clear_mmio_p2m_entry(struct domain *d, unsigned long gfn)
             "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn);
         goto out;
     }
-    rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid, p2m->default_access);
+    rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid,
+                       p2m->default_access);
 
 out:
     gfn_unlock(p2m, gfn, 0);
@@ -834,11 +838,12 @@ set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
         set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
 
     P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn));
-    rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared, p2m->default_access);
+    rc = p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared,
+                       p2m->default_access);
     gfn_unlock(p2m, gfn, 0);
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
-            "set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+            "set_shared_p2m_entry: p2m_set_entry failed! mfn=%08lx\n",
             mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot)));
     return rc;
 }
@@ -896,7 +901,7 @@ int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn)
         goto out;
 
     /* Fix p2m entry */
-    set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
+    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_out, a);
     ret = 0;
 
  out:
@@ -961,7 +966,7 @@ int p2m_mem_paging_evict(struct domain *d, unsigned long gfn)
         put_page(page);
 
     /* Remove mapping from p2m table */
-    set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, a);
+    p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, a);
 
     /* Clear content before returning the page to Xen */
     scrub_one_page(page);
@@ -1070,7 +1075,7 @@ void p2m_mem_paging_populate(struct domain *d, unsigned long gfn)
         if ( p2mt == p2m_ram_paging_out )
             req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
 
-        set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
+        p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in, a);
     }
     gfn_unlock(p2m, gfn, 0);
 
@@ -1170,9 +1175,9 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long gfn, uint64_t buffer)
     /* Make the page already guest-accessible. If the pager still has a
      * pending resume operation, it will be idempotent p2m entry-wise,
      * but will unpause the vcpu */
-    set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, 
-                    paging_mode_log_dirty(d) ? p2m_ram_logdirty : 
-                    p2m_ram_rw, a);
+    p2m_set_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+                  paging_mode_log_dirty(d) ? p2m_ram_logdirty :
+                  p2m_ram_rw, a);
     set_gpfn_from_mfn(mfn_x(mfn), gfn);
 
     if ( !page_extant )
@@ -1222,9 +1227,9 @@ void p2m_mem_paging_resume(struct domain *d)
              * were nominated but not evicted */
             if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) )
             {
-                set_p2m_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K, 
-                                paging_mode_log_dirty(d) ? p2m_ram_logdirty : 
-                                p2m_ram_rw, a);
+                p2m_set_entry(p2m, rsp.gfn, mfn, PAGE_ORDER_4K,
+                              paging_mode_log_dirty(d) ? p2m_ram_logdirty :
+                              p2m_ram_rw, a);
                 set_gpfn_from_mfn(mfn_x(mfn), rsp.gfn);
             }
             gfn_unlock(p2m, rsp.gfn, 0);
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d644f82..0fdfbda 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -593,7 +593,7 @@ void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg);
 
 /* Directly set a p2m entry: only for use by p2m code. Does not need
  * a call to put_gfn afterwards/ */
-int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, 
+int p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
                   unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma);
 
 /* Set up function pointers for PT implementation: only for use by p2m code */
-- 
1.8.3.1

  parent reply	other threads:[~2014-04-10  0:19 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-04-10  0:18 [V2 PATCH 0/3]: Fixup error codes in some p2m paths Mukesh Rathor
2014-04-10  0:19 ` [V2 PATCH 1/3] Rename public functions in p2m-pt.c Mukesh Rathor
2014-04-10  0:19 ` Mukesh Rathor [this message]
2014-04-10  0:19 ` [V2 PATCH 3/3] P2M error code propogation Mukesh Rathor
2014-04-10  7:23   ` Jan Beulich
2014-04-10  6:57 ` [V2 PATCH 0/3]: Fixup error codes in some p2m paths Jan Beulich
2014-04-10 22:04   ` Mukesh Rathor
2014-04-13 20:43 ` Tim Deegan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1397089142-12561-3-git-send-email-mukesh.rathor@oracle.com \
    --to=mukesh.rathor@oracle.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=JBeulich@suse.com \
    --cc=eddie.dong@intel.com \
    --cc=jun.nakajima@intel.com \
    --cc=keir.xen@gmail.com \
    --cc=kevin.tian@intel.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.