All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xen.org
Cc: Paul Durrant <paul.durrant@citrix.com>, Keir Fraser <keir@xen.org>
Subject: [PATCH v7 09/15] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io()
Date: Thu, 9 Jul 2015 14:10:49 +0100	[thread overview]
Message-ID: <1436447455-11524-10-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1436447455-11524-1-git-send-email-paul.durrant@citrix.com>

By removing the calls in hvmemul_do_io() (which is replaced by a single
assignment) and hvm_complete_assist_request() (which is replaced by a
call to hvm_process_portio_intercept() with a suitable set of ops) then
hvm_io_assist() can be moved into hvm.c and made static (and hence be a
candidate for inlining).

The calls to msix_write_completion() and vcpu_end_shutdown_deferral()
are also made unconditionally because the ioreq state will always be
STATE_IOREQ_NONE at the end of hvm_io_assist() so the test was
pointless. These calls are also only relevant when the emulation has
been handled externally which is now always the case.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---

v7:
- No change

v6:
- Added Andrew's reviewed-by

v5:
- Added Jan's acked-by
---
 xen/arch/x86/hvm/emulate.c    |   34 ++++++++++++++++++---
 xen/arch/x86/hvm/hvm.c        |   67 ++++++++++++++++++++++-------------------
 xen/arch/x86/hvm/io.c         |   39 ------------------------
 xen/include/asm-x86/hvm/hvm.h |    1 -
 xen/include/asm-x86/hvm/io.h  |    1 -
 5 files changed, 66 insertions(+), 76 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 53ab3d3..54b9430 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -50,6 +50,32 @@ static void hvmtrace_io_assist(const ioreq_t *p)
     trace_var(event, 0/*!cycles*/, size, buffer);
 }
 
+static int null_read(const struct hvm_io_handler *io_handler,
+                     uint64_t addr,
+                     uint32_t size,
+                     uint64_t *data)
+{
+    *data = ~0ul;
+    return X86EMUL_OKAY;
+}
+
+static int null_write(const struct hvm_io_handler *handler,
+                      uint64_t addr,
+                      uint32_t size,
+                      uint64_t data)
+{
+    return X86EMUL_OKAY;
+}
+
+static const struct hvm_io_ops null_ops = {
+    .read = null_read,
+    .write = null_write
+};
+
+static const struct hvm_io_handler null_handler = {
+    .ops = &null_ops
+};
+
 static int hvmemul_do_io(
     bool_t is_mmio, paddr_t addr, unsigned long reps, unsigned int size,
     uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
@@ -139,8 +165,7 @@ static int hvmemul_do_io(
     switch ( rc )
     {
     case X86EMUL_OKAY:
-        p.state = STATE_IORESP_READY;
-        hvm_io_assist(&p);
+        vio->io_data = p.data;
         vio->io_state = HVMIO_none;
         break;
     case X86EMUL_UNHANDLEABLE:
@@ -151,8 +176,9 @@ static int hvmemul_do_io(
         /* If there is no suitable backing DM, just ignore accesses */
         if ( !s )
         {
-            hvm_complete_assist_req(&p);
-            rc = X86EMUL_OKAY;
+            rc = hvm_process_io_intercept(&null_handler, &p);
+            if ( rc == X86EMUL_OKAY )
+                vio->io_data = p.data;
             vio->io_state = HVMIO_none;
         }
         else
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 9bdc1d6..7358acf 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -411,6 +411,42 @@ bool_t hvm_io_pending(struct vcpu *v)
     return 0;
 }
 
+static void hvm_io_assist(ioreq_t *p)
+{
+    struct vcpu *curr = current;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+    enum hvm_io_state io_state;
+
+    p->state = STATE_IOREQ_NONE;
+
+    io_state = vio->io_state;
+    vio->io_state = HVMIO_none;
+
+    switch ( io_state )
+    {
+    case HVMIO_awaiting_completion:
+        vio->io_state = HVMIO_completed;
+        vio->io_data = p->data;
+        break;
+    case HVMIO_handle_mmio_awaiting_completion:
+        vio->io_state = HVMIO_completed;
+        vio->io_data = p->data;
+        (void)handle_mmio();
+        break;
+    case HVMIO_handle_pio_awaiting_completion:
+        if ( vio->io_size == 4 ) /* Needs zero extension. */
+            guest_cpu_user_regs()->rax = (uint32_t)p->data;
+        else
+            memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
+        break;
+    default:
+        break;
+    }
+
+    msix_write_completion(curr);
+    vcpu_end_shutdown_deferral(curr);
+}
+
 static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
 {
     /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
@@ -2663,37 +2699,6 @@ int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *proto_p)
     return X86EMUL_UNHANDLEABLE;
 }
 
-void hvm_complete_assist_req(ioreq_t *p)
-{
-    switch ( p->type )
-    {
-    case IOREQ_TYPE_PCI_CONFIG:
-        ASSERT_UNREACHABLE();
-        break;
-    case IOREQ_TYPE_COPY:
-    case IOREQ_TYPE_PIO:
-        if ( p->dir == IOREQ_READ )
-        {
-            if ( !p->data_is_ptr )
-                p->data = ~0ul;
-            else
-            {
-                int i, step = p->df ? -p->size : p->size;
-                uint32_t data = ~0;
-
-                for ( i = 0; i < p->count; i++ )
-                    hvm_copy_to_guest_phys(p->data + step * i, &data,
-                                           p->size);
-            }
-        }
-        /* FALLTHRU */
-    default:
-        p->state = STATE_IORESP_READY;
-        hvm_io_assist(p);
-        break;
-    }
-}
-
 void hvm_broadcast_assist_req(ioreq_t *p)
 {
     struct domain *d = current->domain;
diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
index 2c88ddb..fe099d8 100644
--- a/xen/arch/x86/hvm/io.c
+++ b/xen/arch/x86/hvm/io.c
@@ -169,45 +169,6 @@ int handle_pio(uint16_t port, unsigned int size, int dir)
     return 1;
 }
 
-void hvm_io_assist(ioreq_t *p)
-{
-    struct vcpu *curr = current;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
-    enum hvm_io_state io_state;
-
-    p->state = STATE_IOREQ_NONE;
-
-    io_state = vio->io_state;
-    vio->io_state = HVMIO_none;
-
-    switch ( io_state )
-    {
-    case HVMIO_awaiting_completion:
-        vio->io_state = HVMIO_completed;
-        vio->io_data = p->data;
-        break;
-    case HVMIO_handle_mmio_awaiting_completion:
-        vio->io_state = HVMIO_completed;
-        vio->io_data = p->data;
-        (void)handle_mmio();
-        break;
-    case HVMIO_handle_pio_awaiting_completion:
-        if ( vio->io_size == 4 ) /* Needs zero extension. */
-            guest_cpu_user_regs()->rax = (uint32_t)p->data;
-        else
-            memcpy(&guest_cpu_user_regs()->rax, &p->data, vio->io_size);
-        break;
-    default:
-        break;
-    }
-
-    if ( p->state == STATE_IOREQ_NONE )
-    {
-        msix_write_completion(curr);
-        vcpu_end_shutdown_deferral(curr);
-    }
-}
-
 static bool_t dpci_portio_accept(const struct hvm_io_handler *handler,
                                  const ioreq_t *p)
 {
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 1d1fd35..efb8e7d 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -227,7 +227,6 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
                                                  ioreq_t *p);
 int hvm_send_assist_req(struct hvm_ioreq_server *s, ioreq_t *p);
 void hvm_broadcast_assist_req(ioreq_t *p);
-void hvm_complete_assist_req(ioreq_t *p);
 
 void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
 int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);
diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h
index d9e2447..508ec52 100644
--- a/xen/include/asm-x86/hvm/io.h
+++ b/xen/include/asm-x86/hvm/io.h
@@ -125,7 +125,6 @@ int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn,
                                  struct npfec);
 int handle_pio(uint16_t port, unsigned int size, int dir);
 void hvm_interrupt_post(struct vcpu *v, int vector, int type);
-void hvm_io_assist(ioreq_t *p);
 void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
                   const union vioapic_redir_entry *ent);
 void msix_write_completion(struct vcpu *);
-- 
1.7.10.4

  parent reply	other threads:[~2015-07-09 13:10 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-09 13:10 [PATCH v7 00/15] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-07-09 13:10 ` [PATCH v7 01/15] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-07-09 15:13   ` Jan Beulich
2015-07-09 16:16     ` Paul Durrant
2015-07-09 16:24       ` Jan Beulich
2015-07-09 16:27         ` Paul Durrant
2015-07-09 13:10 ` [PATCH v7 02/15] x86/hvm: change hvm_mmio_read_t and hvm_mmio_write_t length argument Paul Durrant
2015-07-09 13:10 ` [PATCH v7 03/15] x86/hvm: restrict port numbers and uint16_t and sizes to unsigned int Paul Durrant
2015-07-09 15:24   ` Jan Beulich
2015-07-09 16:10     ` Paul Durrant
2015-07-09 16:20       ` Jan Beulich
2015-07-09 16:23         ` Paul Durrant
2015-07-09 16:31           ` Jan Beulich
2015-07-09 13:10 ` [PATCH v7 04/15] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-07-09 13:10 ` [PATCH v7 05/15] x86/hvm: add length to mmio check op Paul Durrant
2015-07-09 13:10 ` [PATCH v7 06/15] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-07-09 13:10 ` [PATCH v7 07/15] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-07-09 15:33   ` Jan Beulich
2015-07-09 16:12     ` Paul Durrant
2015-07-09 16:21       ` Jan Beulich
2015-07-09 16:24         ` Paul Durrant
2015-07-09 13:10 ` [PATCH v7 08/15] x86/hvm: limit reps to avoid the need to handle retry Paul Durrant
2015-07-09 13:10 ` Paul Durrant [this message]
2015-07-09 13:10 ` [PATCH v7 10/15] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-07-09 13:10 ` [PATCH v7 11/15] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-07-09 13:10 ` [PATCH v7 12/15] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-07-09 13:10 ` [PATCH v7 13/15] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-07-09 13:10 ` [PATCH v7 14/15] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-07-09 13:10 ` [PATCH v7 15/15] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-07-09 15:46   ` Jan Beulich
2015-07-09 16:05     ` Paul Durrant
2015-07-10  9:27 ` [PATCH v7 00/15] x86/hvm: I/O emulation cleanup and fix | Full Backtrace of domU's X crash caused by SSE2 istruction in attachment Fabio Fantoni
2015-07-10  9:31   ` Paul Durrant
2015-07-10  9:54     ` Fabio Fantoni
2015-07-10 10:09       ` Fabio Fantoni
2015-07-10 10:13         ` Paul Durrant
2015-07-10 10:20         ` Jan Beulich
2015-07-10 10:51           ` Fabio Fantoni
2015-07-10 11:00             ` Jan Beulich
2015-07-09 19:32               ` Zhi Wang
2015-07-10 11:46                 ` Jan Beulich
2015-07-10 11:49               ` Fabio Fantoni

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1436447455-11524-10-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.