All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v4 10/17] x86/hvm: revert 82ed8716b "fix direct PCI port I/O emulation retry...
Date: Wed, 24 Jun 2015 12:24:42 +0100	[thread overview]
Message-ID: <1435145089-21999-11-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1435145089-21999-1-git-send-email-paul.durrant@citrix.com>

...and error handling"

NOTE: A straight reversion was not possible because of subsequent changes
      in the code so this at least partially a manual reversion.

By limiting hvmemul_do_io_addr() to reps falling within the pages on which
a reference has already been taken, we can guarantee that calls to
hvm_copy_to/from_guest_phys() will not hit the HVMCOPY_gfn_paged_out
or HVMCOPY_gfn_shared cases. Thus we can remove the retry logic from
the intercept code and simplify it significantly.

Normally hvmemul_do_io_addr() will only reference single page at a time.
It will, however, take an extra page reference for I/O spanning a page
boundary.

It is still important to know, upon returning from x86_emulate(), whether
the number of reps was reduced so the mmio_retry flag is retained for that
purpose.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/emulate.c     |   86 +++++++++++++++++++++++++++-------------
 xen/arch/x86/hvm/hvm.c         |    4 ++
 xen/arch/x86/hvm/intercept.c   |   52 +++++-------------------
 xen/include/asm-x86/hvm/vcpu.h |    2 +-
 4 files changed, 74 insertions(+), 70 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 4e2fdf1..eefe860 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -52,7 +52,7 @@ static void hvmtrace_io_assist(ioreq_t *p)
 }
 
 static int hvmemul_do_io(
-    bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size,
+    bool_t is_mmio, paddr_t addr, unsigned long reps, unsigned int size,
     uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
 {
     struct vcpu *curr = current;
@@ -61,6 +61,7 @@ static int hvmemul_do_io(
         .type = is_mmio ? IOREQ_TYPE_COPY : IOREQ_TYPE_PIO,
         .addr = addr,
         .size = size,
+        .count = reps,
         .dir = dir,
         .df = df,
         .data = data,
@@ -126,15 +127,6 @@ static int hvmemul_do_io(
         HVMIO_dispatched : HVMIO_awaiting_completion;
     vio->io_size = size;
 
-    /*
-     * When retrying a repeated string instruction, force exit to guest after
-     * completion of the retried iteration to allow handling of interrupts.
-     */
-    if ( vio->mmio_retrying )
-        *reps = 1;
-
-    p.count = *reps;
-
     if ( dir == IOREQ_WRITE )
     {
         if ( !data_is_addr )
@@ -148,17 +140,9 @@ static int hvmemul_do_io(
     switch ( rc )
     {
     case X86EMUL_OKAY:
-    case X86EMUL_RETRY:
-        *reps = p.count;
         p.state = STATE_IORESP_READY;
-        if ( !vio->mmio_retry )
-        {
-            hvm_io_assist(&p);
-            vio->io_state = HVMIO_none;
-        }
-        else
-            /* Defer hvm_io_assist() invocation to hvm_do_resume(). */
-            vio->io_state = HVMIO_handle_mmio_awaiting_completion;
+        hvm_io_assist(&p);
+        vio->io_state = HVMIO_none;
         break;
     case X86EMUL_UNHANDLEABLE:
     {
@@ -236,7 +220,7 @@ static int hvmemul_do_io_buffer(
 
     BUG_ON(buffer == NULL);
 
-    rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 0,
+    rc = hvmemul_do_io(is_mmio, addr, *reps, size, dir, df, 0,
                        (uintptr_t)buffer);
     if ( rc == X86EMUL_UNHANDLEABLE && dir == IOREQ_READ )
         memset(buffer, 0xff, size);
@@ -288,17 +272,66 @@ static int hvmemul_do_io_addr(
     bool_t is_mmio, paddr_t addr, unsigned long *reps,
     unsigned int size, uint8_t dir, bool_t df, paddr_t ram_gpa)
 {
-    struct page_info *ram_page;
+    struct vcpu *v = current;
+    unsigned long ram_gmfn = paddr_to_pfn(ram_gpa);
+    struct page_info *ram_page[2];
+    int nr_pages = 0;
+    unsigned long count;
     int rc;
 
-    rc = hvmemul_acquire_page(paddr_to_pfn(ram_gpa), &ram_page);
+    rc = hvmemul_acquire_page(ram_gmfn, &ram_page[nr_pages]);
     if ( rc != X86EMUL_OKAY )
-        return rc;
+        goto out;
 
-    rc = hvmemul_do_io(is_mmio, addr, reps, size, dir, df, 1,
+    nr_pages++;
+
+    /* Detemine how many reps will fit within this page */
+    for ( count = 0; count < *reps; count++ )
+    {
+        paddr_t start, end;
+
+        if ( df )
+        {
+            start = ram_gpa - count * size;
+            end = ram_gpa + size - 1;
+        }
+        else
+        {
+            start = ram_gpa;
+            end = ram_gpa + (count + 1) * size - 1;
+        }
+
+        if ( paddr_to_pfn(start) != ram_gmfn ||
+             paddr_to_pfn(end) != ram_gmfn )
+            break;
+    }
+
+    if ( count == 0 )
+    {
+        /*
+         * This access must span two pages, so grab a reference to
+         * the next page and do a single rep.
+         */
+        rc = hvmemul_acquire_page(df ? ram_gmfn - 1 : ram_gmfn + 1,
+                                  &ram_page[nr_pages]);
+        if ( rc != X86EMUL_OKAY )
+            goto out;
+
+        nr_pages++;
+        count = 1;
+    }
+
+    rc = hvmemul_do_io(is_mmio, addr, count, size, dir, df, 1,
                        ram_gpa);
+    if ( rc == X86EMUL_OKAY )
+    {
+        v->arch.hvm_vcpu.hvm_io.mmio_retry = (count < *reps);
+        *reps = count;
+    }
 
-    hvmemul_release_page(ram_page);
+ out:
+    while ( --nr_pages >= 0 )
+        hvmemul_release_page(ram_page[nr_pages]);
 
     return rc;
 }
@@ -1550,7 +1583,6 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
     }
 
     hvmemul_ctxt->exn_pending = 0;
-    vio->mmio_retrying = vio->mmio_retry;
     vio->mmio_retry = 0;
 
     if ( cpu_has_vmx )
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f8486f4..626c431 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -440,6 +440,7 @@ static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
 
 void hvm_do_resume(struct vcpu *v)
 {
+    struct hvm_vcpu_io *vio = &v->arch.hvm_vcpu.hvm_io;
     struct domain *d = v->domain;
     struct hvm_ioreq_server *s;
 
@@ -468,6 +469,9 @@ void hvm_do_resume(struct vcpu *v)
         }
     }
 
+    if ( vio->mmio_retry )
+        (void)handle_mmio();
+
     /* Inject pending hw/sw trap */
     if ( v->arch.hvm_vcpu.inject_trap.vector != -1 ) 
     {
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index 625e585..02d7408 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -115,7 +115,6 @@ static const struct hvm_io_ops portio_ops = {
 static int hvm_process_io_intercept(struct hvm_io_handler *handler,
                                     ioreq_t *p)
 {
-    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
     const struct hvm_io_ops *ops = handler->ops;
     int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
     uint64_t data;
@@ -125,23 +124,12 @@ static int hvm_process_io_intercept(struct hvm_io_handler *handler,
     {
         for ( i = 0; i < p->count; i++ )
         {
-            if ( vio->mmio_retrying )
-            {
-                if ( vio->mmio_large_read_bytes != p->size )
-                    return X86EMUL_UNHANDLEABLE;
-                memcpy(&data, vio->mmio_large_read, p->size);
-                vio->mmio_large_read_bytes = 0;
-                vio->mmio_retrying = 0;
-            }
-            else
-            {
-                addr = (p->type == IOREQ_TYPE_COPY) ?
-                    p->addr + step * i :
-                    p->addr;
-                rc = ops->read(handler, addr, p->size, &data);
-                if ( rc != X86EMUL_OKAY )
-                    break;
-            }
+            addr = (p->type == IOREQ_TYPE_COPY) ?
+                p->addr + step * i :
+                p->addr;
+            rc = ops->read(handler, addr, p->size, &data);
+            if ( rc != X86EMUL_OKAY )
+                break;
 
             if ( p->data_is_ptr )
             {
@@ -150,14 +138,12 @@ static int hvm_process_io_intercept(struct hvm_io_handler *handler,
                 {
                 case HVMCOPY_okay:
                     break;
-                case HVMCOPY_gfn_paged_out:
-                case HVMCOPY_gfn_shared:
-                    rc = X86EMUL_RETRY;
-                    break;
                 case HVMCOPY_bad_gfn_to_mfn:
                     /* Drop the write as real hardware would. */
                     continue;
                 case HVMCOPY_bad_gva_to_gfn:
+                case HVMCOPY_gfn_paged_out:
+                case HVMCOPY_gfn_shared:
                     ASSERT_UNREACHABLE();
                     /* fall through */
                 default:
@@ -170,13 +156,6 @@ static int hvm_process_io_intercept(struct hvm_io_handler *handler,
             else
                 p->data = data;
         }
-
-        if ( rc == X86EMUL_RETRY )
-        {
-            vio->mmio_retry = 1;
-            vio->mmio_large_read_bytes = p->size;
-            memcpy(vio->mmio_large_read, &data, p->size);
-        }
     }
     else /* p->dir == IOREQ_WRITE */
     {
@@ -189,14 +168,12 @@ static int hvm_process_io_intercept(struct hvm_io_handler *handler,
                 {
                 case HVMCOPY_okay:
                     break;
-                case HVMCOPY_gfn_paged_out:
-                case HVMCOPY_gfn_shared:
-                    rc = X86EMUL_RETRY;
-                    break;
                 case HVMCOPY_bad_gfn_to_mfn:
                     data = ~0;
                     break;
                 case HVMCOPY_bad_gva_to_gfn:
+                case HVMCOPY_gfn_paged_out:
+                case HVMCOPY_gfn_shared:
                     ASSERT_UNREACHABLE();
                     /* fall through */
                 default:
@@ -216,15 +193,6 @@ static int hvm_process_io_intercept(struct hvm_io_handler *handler,
             if ( rc != X86EMUL_OKAY )
                 break;
         }
-
-        if ( rc == X86EMUL_RETRY )
-            vio->mmio_retry = 1;
-    }
-
-    if ( i != 0 )
-    {
-        p->count = i;
-        rc = X86EMUL_OKAY;
     }
 
     return rc;
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index dd08416..97d78bd 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -74,7 +74,7 @@ struct hvm_vcpu_io {
      * For string instruction emulation we need to be able to signal a
      * necessary retry through other than function return codes.
      */
-    bool_t mmio_retry, mmio_retrying;
+    bool_t mmio_retry;
 
     unsigned long msix_unmask_address;
 
-- 
1.7.10.4

  parent reply	other threads:[~2015-06-24 11:45 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-24 11:24 [PATCH v4 00/17] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-06-24 11:24 ` [PATCH v4 01/17] x86/hvm: simplify hvmemul_do_io() Paul Durrant
2015-06-24 11:24 ` [PATCH v4 02/17] x86/hvm: remove hvm_io_pending() check in hvmemul_do_io() Paul Durrant
2015-06-24 11:24 ` [PATCH v4 03/17] x86/hvm: remove extraneous parameter from hvmtrace_io_assist() Paul Durrant
2015-06-24 11:24 ` [PATCH v4 04/17] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-06-24 12:33   ` Jan Beulich
2015-06-24 12:59     ` Paul Durrant
2015-06-24 13:09       ` Jan Beulich
2015-06-24 11:24 ` [PATCH v4 05/17] x86/hvm: re-name struct hvm_mmio_handler to hvm_mmio_ops Paul Durrant
2015-06-24 11:24 ` [PATCH v4 06/17] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-06-24 11:24 ` [PATCH v4 07/17] x86/hvm: add length to mmio check op Paul Durrant
2015-06-24 13:08   ` Jan Beulich
2015-06-24 13:14     ` Paul Durrant
2015-06-24 13:25       ` Jan Beulich
2015-06-24 13:34         ` Paul Durrant
2015-06-24 14:01           ` Jan Beulich
2015-06-25 12:21   ` Andrew Cooper
2015-06-25 12:46     ` Jan Beulich
2015-06-25 13:08       ` Paul Durrant
2015-06-25 13:29         ` Jan Beulich
2015-06-25 13:30           ` Paul Durrant
2015-06-25 13:34       ` Andrew Cooper
2015-06-25 13:36         ` Paul Durrant
2015-06-25 13:37           ` Andrew Cooper
2015-06-25 13:38             ` Paul Durrant
2015-06-25 13:46               ` Andrew Cooper
2015-06-25 13:48                 ` Paul Durrant
2015-06-25 13:47           ` Jan Beulich
2015-06-25 13:52             ` Paul Durrant
2015-06-25 14:46               ` Jan Beulich
2015-06-25 14:57                 ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 08/17] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-06-24 13:46   ` Jan Beulich
2015-06-24 11:24 ` [PATCH v4 09/17] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-06-24 13:59   ` Jan Beulich
2015-06-24 14:12     ` Paul Durrant
2015-06-24 14:30       ` Jan Beulich
2015-06-24 14:43         ` Paul Durrant
2015-06-24 11:24 ` Paul Durrant [this message]
2015-06-24 15:21   ` [PATCH v4 10/17] x86/hvm: revert 82ed8716b "fix direct PCI port I/O emulation retry Jan Beulich
2015-06-24 15:23     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 11/17] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-06-24 15:36   ` Jan Beulich
2015-06-24 15:50     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 12/17] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-06-25  9:40   ` Jan Beulich
2015-06-25 15:59     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 13/17] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-06-24 15:52   ` Jan Beulich
2015-06-24 16:00     ` Paul Durrant
2015-06-24 16:12       ` Jan Beulich
2015-06-24 17:00         ` Paul Durrant
2015-06-25 12:29   ` Andrew Cooper
2015-06-24 11:24 ` [PATCH v4 14/17] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-06-25  9:43   ` Jan Beulich
2015-06-25  9:46     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 15/17] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-06-25  9:51   ` Jan Beulich
2015-06-25 10:17     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 16/17] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-06-25  9:57   ` Jan Beulich
2015-06-25 10:32     ` Paul Durrant
2015-06-25 10:50       ` Jan Beulich
2015-06-25 10:52         ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 17/17] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-06-25 10:46   ` Jan Beulich
2015-06-25 10:51     ` Paul Durrant
2015-06-25 11:05       ` Jan Beulich
2015-06-25 10:55     ` Paul Durrant
2015-06-25 11:08       ` Jan Beulich
2015-06-24 12:16 ` [PATCH v4 00/17] x86/hvm: I/O emulation cleanup and fix Jan Beulich
2015-06-24 12:52   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1435145089-21999-11-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.