All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v2 REPOST 08/12] x86/hvm/ioreq: move is_default into struct hvm_ioreq_server
Date: Tue, 22 Aug 2017 15:51:02 +0100	[thread overview]
Message-ID: <20170822145107.6877-9-paul.durrant@citrix.com> (raw)
In-Reply-To: <20170822145107.6877-1-paul.durrant@citrix.com>

Legacy emulators use the 'default' IOREQ server which has slightly
different semantics than other, explicitly created, IOREQ servers.

Because of this, most of the initialization and teardown code needs to
know whether the server is default or not. This is currently achieved
by passing an is_default boolean argument to the functions in question,
whereas this argument could be avoided by adding a field to the
hvm_ioreq_server structure which is also passed as an argument to all
the relevant functions.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/ioreq.c         | 80 ++++++++++++++++++----------------------
 xen/include/asm-x86/hvm/domain.h |  1 +
 2 files changed, 36 insertions(+), 45 deletions(-)

diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 5e01e1a6d2..5737082238 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -302,7 +302,7 @@ static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
 }
 
 static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
-                                     bool is_default, struct vcpu *v)
+                                     struct vcpu *v)
 {
     struct hvm_ioreq_vcpu *sv;
     int rc;
@@ -331,7 +331,7 @@ static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
             goto fail3;
 
         s->bufioreq_evtchn = rc;
-        if ( is_default )
+        if ( s->is_default )
             d->arch.hvm_domain.params[HVM_PARAM_BUFIOREQ_EVTCHN] =
                 s->bufioreq_evtchn;
     }
@@ -431,7 +431,6 @@ static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s,
 }
 
 static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
-                                        bool is_default,
                                         bool handle_bufioreq)
 {
     struct domain *d = s->domain;
@@ -439,7 +438,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
     unsigned long bufioreq_gfn = gfn_x(INVALID_GFN);
     int rc;
 
-    if ( is_default )
+    if ( s->is_default )
     {
         /*
          * The default ioreq server must handle buffered ioreqs, for
@@ -468,8 +467,7 @@ static int hvm_ioreq_server_setup_pages(struct hvm_ioreq_server *s,
     return rc;
 }
 
-static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
-                                         bool is_default)
+static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->domain;
     bool handle_bufioreq = !!s->bufioreq.va;
@@ -479,7 +477,7 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
 
     hvm_unmap_ioreq_page(s, false);
 
-    if ( !is_default )
+    if ( !s->is_default )
     {
         if ( handle_bufioreq )
             hvm_free_ioreq_gfn(d, s->bufioreq.gfn);
@@ -488,25 +486,23 @@ static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s,
     }
 }
 
-static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s,
-                                            bool is_default)
+static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
 {
     unsigned int i;
 
-    if ( is_default )
+    if ( s->is_default )
         return;
 
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
         rangeset_destroy(s->range[i]);
 }
 
-static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
-                                            bool is_default)
+static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s)
 {
     unsigned int i;
     int rc;
 
-    if ( is_default )
+    if ( s->is_default )
         goto done;
 
     for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
@@ -537,13 +533,12 @@ static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
     return 0;
 
  fail:
-    hvm_ioreq_server_free_rangesets(s, false);
+    hvm_ioreq_server_free_rangesets(s);
 
     return rc;
 }
 
-static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
-                                    bool is_default)
+static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->domain;
     struct hvm_ioreq_vcpu *sv;
@@ -554,7 +549,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
     if ( s->enabled )
         goto done;
 
-    if ( !is_default )
+    if ( !s->is_default )
     {
         hvm_remove_ioreq_gfn(d, &s->ioreq);
 
@@ -573,8 +568,7 @@ static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s,
     spin_unlock(&s->lock);
 }
 
-static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
-                                     bool is_default)
+static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
 {
     struct domain *d = s->domain;
     bool handle_bufioreq = !!s->bufioreq.va;
@@ -584,7 +578,7 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
     if ( !s->enabled )
         goto done;
 
-    if ( !is_default )
+    if ( !s->is_default )
     {
         if ( handle_bufioreq )
             hvm_add_ioreq_gfn(d, &s->bufioreq);
@@ -600,8 +594,7 @@ static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s,
 
 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
                                  struct domain *d, domid_t domid,
-                                 bool is_default, int bufioreq_handling,
-                                 ioservid_t id)
+                                 int bufioreq_handling, ioservid_t id)
 {
     struct vcpu *v;
     int rc;
@@ -614,7 +607,7 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
     INIT_LIST_HEAD(&s->ioreq_vcpu_list);
     spin_lock_init(&s->bufioreq_lock);
 
-    rc = hvm_ioreq_server_alloc_rangesets(s, is_default);
+    rc = hvm_ioreq_server_alloc_rangesets(s);
     if ( rc )
         return rc;
 
@@ -622,13 +615,13 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
         s->bufioreq_atomic = true;
 
     rc = hvm_ioreq_server_setup_pages(
-             s, is_default, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
+             s, bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF);
     if ( rc )
         goto fail_map;
 
     for_each_vcpu ( d, v )
     {
-        rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+        rc = hvm_ioreq_server_add_vcpu(s, v);
         if ( rc )
             goto fail_add;
     }
@@ -637,21 +630,20 @@ static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
 
  fail_add:
     hvm_ioreq_server_remove_all_vcpus(s);
-    hvm_ioreq_server_unmap_pages(s, is_default);
+    hvm_ioreq_server_unmap_pages(s);
 
  fail_map:
-    hvm_ioreq_server_free_rangesets(s, is_default);
+    hvm_ioreq_server_free_rangesets(s);
 
     return rc;
 }
 
-static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s,
-                                    bool is_default)
+static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
 {
     ASSERT(!s->enabled);
     hvm_ioreq_server_remove_all_vcpus(s);
-    hvm_ioreq_server_unmap_pages(s, is_default);
-    hvm_ioreq_server_free_rangesets(s, is_default);
+    hvm_ioreq_server_unmap_pages(s);
+    hvm_ioreq_server_free_rangesets(s);
 }
 
 static ioservid_t next_ioservid(struct domain *d)
@@ -695,6 +687,8 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
     if ( !s )
         goto fail1;
 
+    s->is_default = is_default;
+
     domain_pause(d);
     spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
 
@@ -702,7 +696,7 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
     if ( is_default && d->arch.hvm_domain.default_ioreq_server != NULL )
         goto fail2;
 
-    rc = hvm_ioreq_server_init(s, d, domid, is_default, bufioreq_handling,
+    rc = hvm_ioreq_server_init(s, d, domid, bufioreq_handling,
                                next_ioservid(d));
     if ( rc )
         goto fail3;
@@ -713,7 +707,7 @@ int hvm_create_ioreq_server(struct domain *d, domid_t domid,
     if ( is_default )
     {
         d->arch.hvm_domain.default_ioreq_server = s;
-        hvm_ioreq_server_enable(s, true);
+        hvm_ioreq_server_enable(s);
     }
 
     if ( id )
@@ -756,11 +750,11 @@ int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
 
         p2m_set_ioreq_server(d, 0, s);
 
-        hvm_ioreq_server_disable(s, false);
+        hvm_ioreq_server_disable(s);
 
         list_del(&s->list_entry);
 
-        hvm_ioreq_server_deinit(s, false);
+        hvm_ioreq_server_deinit(s);
 
         domain_unpause(d);
 
@@ -992,9 +986,9 @@ int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
         domain_pause(d);
 
         if ( enabled )
-            hvm_ioreq_server_enable(s, false);
+            hvm_ioreq_server_enable(s);
         else
-            hvm_ioreq_server_disable(s, false);
+            hvm_ioreq_server_disable(s);
 
         domain_unpause(d);
 
@@ -1017,9 +1011,7 @@ int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
                           &d->arch.hvm_domain.ioreq_server.list,
                           list_entry )
     {
-        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
-
-        rc = hvm_ioreq_server_add_vcpu(s, is_default, v);
+        rc = hvm_ioreq_server_add_vcpu(s, v);
         if ( rc )
             goto fail;
     }
@@ -1066,16 +1058,14 @@ void hvm_destroy_all_ioreq_servers(struct domain *d)
                                &d->arch.hvm_domain.ioreq_server.list,
                                list_entry )
     {
-        bool is_default = (s == d->arch.hvm_domain.default_ioreq_server);
-
-        hvm_ioreq_server_disable(s, is_default);
+        hvm_ioreq_server_disable(s);
 
-        if ( is_default )
+        if ( s->is_default )
             d->arch.hvm_domain.default_ioreq_server = NULL;
 
         list_del(&s->list_entry);
 
-        hvm_ioreq_server_deinit(s, is_default);
+        hvm_ioreq_server_deinit(s);
 
         xfree(s);
     }
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 7f128c05ff..16344d173b 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -71,6 +71,7 @@ struct hvm_ioreq_server {
     struct rangeset        *range[NR_IO_RANGE_TYPES];
     bool                   enabled;
     bool                   bufioreq_atomic;
+    bool                   is_default;
 };
 
 /*
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-08-22 14:51 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-22 14:50 [PATCH v2 REPOST 00/12] x86: guest resource mapping Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 01/12] [x86|arm]: remove code duplication Paul Durrant
2017-08-24 14:12   ` Jan Beulich
2017-08-24 14:16     ` Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 02/12] x86/mm: allow a privileged PV domain to map guest mfns Paul Durrant
2017-08-24 16:33   ` Wei Liu
2017-08-25 10:05     ` Paul Durrant
2017-08-28 14:38       ` Wei Liu
2017-08-29  8:37         ` Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 03/12] x86/mm: add HYPERVISOR_memory_op to acquire guest resources Paul Durrant
2017-08-28 15:01   ` Wei Liu
2017-08-29  8:32     ` Paul Durrant
2017-08-29  8:59       ` Jan Beulich
2017-08-29  9:13         ` Paul Durrant
2017-08-29  9:27           ` Jan Beulich
2017-08-29  9:31             ` Paul Durrant
2017-08-29  9:38               ` Jan Beulich
2017-08-29 11:16   ` George Dunlap
2017-08-29 11:19     ` Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 04/12] tools/libxenforeignmemory: add support for resource mapping Paul Durrant
2017-08-24 15:52   ` Roger Pau Monné
2017-08-24 15:58     ` Paul Durrant
2017-08-22 14:50 ` [PATCH v2 REPOST 05/12] tools/libxenctrl: use new xenforeignmemory API to seed grant table Paul Durrant
2017-08-24 16:02   ` Roger Pau Monné
2017-08-24 16:09     ` Paul Durrant
2017-08-28 15:04       ` Wei Liu
2017-08-22 14:51 ` [PATCH v2 REPOST 06/12] x86/hvm/ioreq: rename .*pfn and .*gmfn to .*gfn Paul Durrant
2017-08-24 16:06   ` Roger Pau Monné
2017-08-28 15:01   ` Wei Liu
2017-08-22 14:51 ` [PATCH v2 REPOST 07/12] x86/hvm/ioreq: use bool rather than bool_t Paul Durrant
2017-08-24 16:11   ` Roger Pau Monné
2017-08-22 14:51 ` Paul Durrant [this message]
2017-08-24 16:21   ` [PATCH v2 REPOST 08/12] x86/hvm/ioreq: move is_default into struct hvm_ioreq_server Roger Pau Monné
2017-08-24 16:31     ` Paul Durrant
2017-08-22 14:51 ` [PATCH v2 REPOST 09/12] x86/hvm/ioreq: simplify code and use consistent naming Paul Durrant
2017-08-24 17:02   ` Roger Pau Monné
2017-08-25 10:18     ` Paul Durrant
2017-08-22 14:51 ` [PATCH v2 REPOST 10/12] x86/hvm/ioreq: use gfn_t in struct hvm_ioreq_page Paul Durrant
2017-08-24 17:05   ` Roger Pau Monné
2017-08-22 14:51 ` [PATCH v2 REPOST 11/12] x86/hvm/ioreq: defer mapping gfns until they are actually requsted Paul Durrant
2017-08-24 17:21   ` Roger Pau Monné
2017-08-25  9:52     ` Paul Durrant
2017-08-28 15:08   ` Wei Liu
2017-08-29  8:51     ` Paul Durrant
2017-08-22 14:51 ` [PATCH v2 REPOST 12/12] x86/hvm/ioreq: add a new mappable resource type Paul Durrant
2017-08-25  9:32   ` Roger Pau Monné
2017-08-25  9:46     ` Paul Durrant
2017-08-25  9:53       ` Roger Pau Monne
2017-08-25  9:58         ` Paul Durrant
2017-08-29 11:36       ` George Dunlap
2017-08-29 13:40       ` George Dunlap
2017-08-29 14:10         ` Paul Durrant
2017-08-29 14:26           ` George Dunlap
2017-08-29 14:31             ` Paul Durrant
2017-08-29 14:38               ` George Dunlap
2017-08-29 14:49                 ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170822145107.6877-9-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.