All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v4 17/17] x86/hvm: track large memory mapped accesses by buffer offset
Date: Wed, 24 Jun 2015 12:24:49 +0100	[thread overview]
Message-ID: <1435145089-21999-18-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1435145089-21999-1-git-send-email-paul.durrant@citrix.com>

The code in hvmemul_do_io() that tracks large reads or writes, to avoid
re-issue of component I/O, is defeated by accesses across a page boundary
because it uses physical address. The code is also only relevant to memory
mapped I/O to or from a buffer.

This patch re-factors the code and moves it into hvmemul_phys_mmio_access()
where it is relevant and tracks using buffer offset rather then address.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/emulate.c     |   98 ++++++++++++++++------------------------
 xen/include/asm-x86/hvm/vcpu.h |   16 ++++---
 2 files changed, 48 insertions(+), 66 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index aa68787..4424dfc 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -107,29 +107,6 @@ static int hvmemul_do_io(
         return X86EMUL_UNHANDLEABLE;
     }
 
-    if ( is_mmio && !data_is_addr )
-    {
-        /* Part of a multi-cycle read or write? */
-        if ( dir == IOREQ_WRITE )
-        {
-            paddr_t pa = vio->mmio_large_write_pa;
-            unsigned int bytes = vio->mmio_large_write_bytes;
-            if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
-                return X86EMUL_OKAY;
-        }
-        else
-        {
-            paddr_t pa = vio->mmio_large_read_pa;
-            unsigned int bytes = vio->mmio_large_read_bytes;
-            if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
-            {
-                memcpy(p_data, &vio->mmio_large_read[addr - pa],
-                       size);
-                return X86EMUL_OKAY;
-            }
-        }
-    }
-
     switch ( vio->io_req.state )
     {
     case STATE_IOREQ_NONE:
@@ -209,33 +186,6 @@ static int hvmemul_do_io(
             memcpy(p_data, &p.data, size);
     }
 
-    if ( is_mmio && !data_is_addr )
-    {
-        /* Part of a multi-cycle read or write? */
-        if ( dir == IOREQ_WRITE )
-        {
-            paddr_t pa = vio->mmio_large_write_pa;
-            unsigned int bytes = vio->mmio_large_write_bytes;
-            if ( bytes == 0 )
-                pa = vio->mmio_large_write_pa = addr;
-            if ( addr == (pa + bytes) )
-                vio->mmio_large_write_bytes += size;
-        }
-        else
-        {
-            paddr_t pa = vio->mmio_large_read_pa;
-            unsigned int bytes = vio->mmio_large_read_bytes;
-            if ( bytes == 0 )
-                pa = vio->mmio_large_read_pa = addr;
-            if ( (addr == (pa + bytes)) &&
-                 ((bytes + size) <= sizeof(vio->mmio_large_read)) )
-            {
-                memcpy(&vio->mmio_large_read[bytes], p_data, size);
-                vio->mmio_large_read_bytes += size;
-            }
-        }
-    }
-
     return X86EMUL_OKAY;
 }
 
@@ -601,8 +551,11 @@ static int hvmemul_virtual_to_linear(
 }
 
 static int hvmemul_phys_mmio_access(
-    paddr_t gpa, unsigned int size, uint8_t dir, uint8_t **buffer)
+    paddr_t gpa, unsigned int size, uint8_t dir, uint8_t *buffer,
+    unsigned int *off)
 {
+    struct vcpu *curr = current;
+    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     unsigned long one_rep = 1;
     unsigned int chunk;
     int rc;
@@ -621,14 +574,41 @@ static int hvmemul_phys_mmio_access(
 
     for ( ;; )
     {
-        rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
-                                    *buffer);
-        if ( rc != X86EMUL_OKAY )
-            break;
+        /* Have we already done this chunk? */
+        if ( (*off + chunk) <= vio->mmio_cache[dir].size )
+        {
+            ASSERT(*off + chunk <= vio->mmio_cache[dir].size);
+
+            if ( dir == IOREQ_READ )
+                memcpy(&buffer[*off],
+                       &vio->mmio_cache[IOREQ_READ].buffer[*off],
+                       chunk);
+            else
+            {
+                if ( memcmp(&buffer[*off],
+                            &vio->mmio_cache[IOREQ_WRITE].buffer[*off],
+                            chunk) != 0 )
+                    domain_crash(curr->domain);
+            }
+        }
+        else
+        {
+            ASSERT(*off == vio->mmio_cache[dir].size);
+
+            rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
+                                        &buffer[*off]);
+            if ( rc != X86EMUL_OKAY )
+                break;
+
+            /* Note that we have now done this chunk */
+            memcpy(&vio->mmio_cache[dir].buffer[*off],
+                   &buffer[*off], chunk);
+            vio->mmio_cache[dir].size += chunk;
+        }
 
         /* Advance to the next chunk */
         gpa += chunk;
-        *buffer += chunk;
+        *off += chunk;
         size -= chunk;
 
         if ( size == 0 )
@@ -651,7 +631,7 @@ static int hvmemul_linear_mmio_access(
 {
     struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
     unsigned long page_off = gla & (PAGE_SIZE - 1);
-    unsigned int chunk;
+    unsigned int chunk, buffer_off = 0;
     paddr_t gpa;
     unsigned long one_rep = 1;
     int rc;
@@ -670,7 +650,7 @@ static int hvmemul_linear_mmio_access(
 
     for ( ;; )
     {
-        rc = hvmemul_phys_mmio_access(gpa, chunk, dir, &buffer);
+        rc = hvmemul_phys_mmio_access(gpa, chunk, dir, buffer, &buffer_off);
         if ( rc != X86EMUL_OKAY )
             break;
 
@@ -1625,7 +1605,7 @@ static int _hvm_emulate_one(struct hvm_emulate_ctxt *hvmemul_ctxt,
         rc = X86EMUL_RETRY;
     if ( rc != X86EMUL_RETRY )
     {
-        vio->mmio_large_read_bytes = vio->mmio_large_write_bytes = 0;
+        memset(&vio->mmio_cache, 0, sizeof(vio->mmio_cache));
         vio->mmio_insn_bytes = 0;
     }
     else
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 008c8fa..4f41c83 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -61,13 +61,15 @@ struct hvm_vcpu_io {
     unsigned long       mmio_gva;
     unsigned long       mmio_gpfn;
 
-    /* We may read up to m256 as a number of device-model transactions. */
-    paddr_t mmio_large_read_pa;
-    uint8_t mmio_large_read[32];
-    unsigned int mmio_large_read_bytes;
-    /* We may write up to m256 as a number of device-model transactions. */
-    unsigned int mmio_large_write_bytes;
-    paddr_t mmio_large_write_pa;
+    /*
+     * We may read or write up to m256 as a number of device-model
+     * transactions.
+     */
+    struct {
+        unsigned long size;
+        uint8_t buffer[32];
+    } mmio_cache[2]; /* Indexed by ioreq type */
+
     /* For retries we shouldn't re-fetch the instruction. */
     unsigned int mmio_insn_bytes;
     unsigned char mmio_insn[16];
-- 
1.7.10.4

  parent reply	other threads:[~2015-06-24 11:45 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-24 11:24 [PATCH v4 00/17] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-06-24 11:24 ` [PATCH v4 01/17] x86/hvm: simplify hvmemul_do_io() Paul Durrant
2015-06-24 11:24 ` [PATCH v4 02/17] x86/hvm: remove hvm_io_pending() check in hvmemul_do_io() Paul Durrant
2015-06-24 11:24 ` [PATCH v4 03/17] x86/hvm: remove extraneous parameter from hvmtrace_io_assist() Paul Durrant
2015-06-24 11:24 ` [PATCH v4 04/17] x86/hvm: remove multiple open coded 'chunking' loops Paul Durrant
2015-06-24 12:33   ` Jan Beulich
2015-06-24 12:59     ` Paul Durrant
2015-06-24 13:09       ` Jan Beulich
2015-06-24 11:24 ` [PATCH v4 05/17] x86/hvm: re-name struct hvm_mmio_handler to hvm_mmio_ops Paul Durrant
2015-06-24 11:24 ` [PATCH v4 06/17] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-06-24 11:24 ` [PATCH v4 07/17] x86/hvm: add length to mmio check op Paul Durrant
2015-06-24 13:08   ` Jan Beulich
2015-06-24 13:14     ` Paul Durrant
2015-06-24 13:25       ` Jan Beulich
2015-06-24 13:34         ` Paul Durrant
2015-06-24 14:01           ` Jan Beulich
2015-06-25 12:21   ` Andrew Cooper
2015-06-25 12:46     ` Jan Beulich
2015-06-25 13:08       ` Paul Durrant
2015-06-25 13:29         ` Jan Beulich
2015-06-25 13:30           ` Paul Durrant
2015-06-25 13:34       ` Andrew Cooper
2015-06-25 13:36         ` Paul Durrant
2015-06-25 13:37           ` Andrew Cooper
2015-06-25 13:38             ` Paul Durrant
2015-06-25 13:46               ` Andrew Cooper
2015-06-25 13:48                 ` Paul Durrant
2015-06-25 13:47           ` Jan Beulich
2015-06-25 13:52             ` Paul Durrant
2015-06-25 14:46               ` Jan Beulich
2015-06-25 14:57                 ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 08/17] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-06-24 13:46   ` Jan Beulich
2015-06-24 11:24 ` [PATCH v4 09/17] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-06-24 13:59   ` Jan Beulich
2015-06-24 14:12     ` Paul Durrant
2015-06-24 14:30       ` Jan Beulich
2015-06-24 14:43         ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 10/17] x86/hvm: revert 82ed8716b "fix direct PCI port I/O emulation retry Paul Durrant
2015-06-24 15:21   ` Jan Beulich
2015-06-24 15:23     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 11/17] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-06-24 15:36   ` Jan Beulich
2015-06-24 15:50     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 12/17] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-06-25  9:40   ` Jan Beulich
2015-06-25 15:59     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 13/17] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-06-24 15:52   ` Jan Beulich
2015-06-24 16:00     ` Paul Durrant
2015-06-24 16:12       ` Jan Beulich
2015-06-24 17:00         ` Paul Durrant
2015-06-25 12:29   ` Andrew Cooper
2015-06-24 11:24 ` [PATCH v4 14/17] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-06-25  9:43   ` Jan Beulich
2015-06-25  9:46     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 15/17] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-06-25  9:51   ` Jan Beulich
2015-06-25 10:17     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 16/17] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-06-25  9:57   ` Jan Beulich
2015-06-25 10:32     ` Paul Durrant
2015-06-25 10:50       ` Jan Beulich
2015-06-25 10:52         ` Paul Durrant
2015-06-24 11:24 ` Paul Durrant [this message]
2015-06-25 10:46   ` [PATCH v4 17/17] x86/hvm: track large memory mapped accesses by buffer offset Jan Beulich
2015-06-25 10:51     ` Paul Durrant
2015-06-25 11:05       ` Jan Beulich
2015-06-25 10:55     ` Paul Durrant
2015-06-25 11:08       ` Jan Beulich
2015-06-24 12:16 ` [PATCH v4 00/17] x86/hvm: I/O emulation cleanup and fix Jan Beulich
2015-06-24 12:52   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1435145089-21999-18-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.