All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Keir Fraser <keir@xen.org>, Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v4 04/17] x86/hvm: remove multiple open coded 'chunking' loops
Date: Wed, 24 Jun 2015 12:24:36 +0100	[thread overview]
Message-ID: <1435145089-21999-5-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1435145089-21999-1-git-send-email-paul.durrant@citrix.com>

...in hvmemul_read/write()

Add hvmemul_phys_mmio_access() and hvmemul_linear_mmio_access() functions
to reduce code duplication.

NOTE: This patch also introduces a change in 'chunking' around a page
      boundary. Previously (for example) an 8 byte access at the last
      byte of a page would get carried out as 8 single-byte accesses.
      It will now be carried out as a single-byte access, followed by
      a 4-byte access, a 2-byte access and then another single-byte
      access.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Keir Fraser <keir@xen.org>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/emulate.c |  225 +++++++++++++++++++++++---------------------
 1 file changed, 118 insertions(+), 107 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 935eab3..4d11c6c 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -540,6 +540,119 @@ static int hvmemul_virtual_to_linear(
     return X86EMUL_EXCEPTION;
 }
 
+static int hvmemul_phys_mmio_access(
+    paddr_t gpa, unsigned int size, uint8_t dir, uint8_t **buffer)
+{
+    unsigned long one_rep = 1;
+    unsigned int chunk;
+    int rc;
+
+    /* Accesses must fall within a page */
+    BUG_ON((gpa & (PAGE_SIZE - 1)) + size > PAGE_SIZE);
+
+    /*
+     * hvmemul_do_io() cannot handle non-power-of-2 accesses or
+     * accesses larger than sizeof(long), so choose the highest power
+     * of 2 not exceeding sizeof(long) as the 'chunk' size.
+     */
+    chunk = 1 << (fls(size) - 1);
+    if ( chunk > sizeof (long) )
+        chunk = sizeof (long);
+
+    for ( ;; )
+    {
+        rc = hvmemul_do_mmio_buffer(gpa, &one_rep, chunk, dir, 0,
+                                    *buffer);
+        if ( rc != X86EMUL_OKAY )
+            break;
+
+        /* Advance to the next chunk */
+        gpa += chunk;
+        *buffer += chunk;
+        size -= chunk;
+
+        if ( size == 0 )
+            break;
+
+        /*
+         * If the chunk now exceeds the remaining size, choose the next
+         * lowest power of 2 that will fit.
+         */
+        while ( chunk > size )
+            chunk >>= 1;
+    }
+
+    return rc;
+}
+
+static int hvmemul_linear_mmio_access(
+    unsigned long gla, unsigned int size, uint8_t dir, uint8_t *buffer,
+    uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt, bool_t translate)
+{
+    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
+    unsigned long page_off = gla & (PAGE_SIZE - 1);
+    unsigned int chunk;
+    paddr_t gpa;
+    unsigned long one_rep = 1;
+    int rc;
+
+    chunk = min_t(unsigned int, size, PAGE_SIZE - page_off);
+
+    if ( translate )
+        gpa = pfn_to_paddr(vio->mmio_gpfn) | page_off;
+    else
+    {
+        rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec,
+                                    hvmemul_ctxt);
+        if ( rc != X86EMUL_OKAY )
+            return rc;
+    }
+
+    for ( ;; )
+    {
+        rc = hvmemul_phys_mmio_access(gpa, chunk, dir, &buffer);
+        if ( rc != X86EMUL_OKAY )
+            break;
+
+        gla += chunk;
+        gpa += chunk;
+        size -= chunk;
+
+        if ( size == 0 )
+            break;
+
+        ASSERT((gla & (PAGE_SIZE - 1)) == 0);
+        chunk = min_t(unsigned int, size, PAGE_SIZE);
+        if ( !translate )
+        {
+            rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec,
+                                        hvmemul_ctxt);
+            if ( rc != X86EMUL_OKAY )
+                return rc;
+        }
+    }
+
+    return rc;
+}
+
+static inline int hvmemul_linear_mmio_read(
+    unsigned long gla, unsigned int size, void *buffer,
+    uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt,
+    bool_t translate)
+{
+    return hvmemul_linear_mmio_access(gla, size, IOREQ_READ, buffer,
+                                      pfec, hvmemul_ctxt, translate);
+}
+
+static inline int hvmemul_linear_mmio_write(
+    unsigned long gla, unsigned int size, void *buffer,
+    uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt,
+    bool_t translate)
+{
+    return hvmemul_linear_mmio_access(gla, size, IOREQ_WRITE, buffer,
+                                      pfec, hvmemul_ctxt, translate);
+}
+
 static int __hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
@@ -550,51 +663,19 @@ static int __hvmemul_read(
 {
     struct vcpu *curr = current;
     unsigned long addr, reps = 1;
-    unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
     uint32_t pfec = PFEC_page_present;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
-    paddr_t gpa;
     int rc;
 
     rc = hvmemul_virtual_to_linear(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
         return rc;
-    off = addr & (PAGE_SIZE - 1);
-    /*
-     * We only need to handle sizes actual instruction operands can have. All
-     * such sizes are either powers of 2 or the sum of two powers of 2. Thus
-     * picking as initial chunk size the largest power of 2 not greater than
-     * the total size will always result in only power-of-2 size requests
-     * issued to hvmemul_do_mmio() (hvmemul_do_io() rejects non-powers-of-2).
-     */
-    while ( chunk & (chunk - 1) )
-        chunk &= chunk - 1;
-    if ( off + bytes > PAGE_SIZE )
-        while ( off & (chunk - 1) )
-            chunk >>= 1;
-
     if ( ((access_type != hvm_access_insn_fetch
            ? vio->mmio_access.read_access
            : vio->mmio_access.insn_fetch)) &&
          (vio->mmio_gva == (addr & PAGE_MASK)) )
-    {
-        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
-        while ( (off + chunk) <= PAGE_SIZE )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                return rc;
-            addr += chunk;
-            off += chunk;
-            gpa += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-        }
-    }
+        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
     if ( (seg != x86_seg_none) &&
          (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
@@ -613,30 +694,8 @@ static int __hvmemul_read(
     case HVMCOPY_bad_gfn_to_mfn:
         if ( access_type == hvm_access_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
-        rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                    hvmemul_ctxt);
-        while ( rc == X86EMUL_OKAY )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_READ, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                break;
-            addr += chunk;
-            off += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-            if ( off < PAGE_SIZE )
-                gpa += chunk;
-            else
-            {
-                rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                            hvmemul_ctxt);
-                off = 0;
-            }
-        }
-        return rc;
+
+        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
     case HVMCOPY_gfn_paged_out:
     case HVMCOPY_gfn_shared:
         return X86EMUL_RETRY;
@@ -702,43 +761,18 @@ static int hvmemul_write(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     struct vcpu *curr = current;
     unsigned long addr, reps = 1;
-    unsigned int off, chunk = min(bytes, 1U << LONG_BYTEORDER);
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
-    paddr_t gpa;
     int rc;
 
     rc = hvmemul_virtual_to_linear(
         seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY )
         return rc;
-    off = addr & (PAGE_SIZE - 1);
-    /* See the respective comment in __hvmemul_read(). */
-    while ( chunk & (chunk - 1) )
-        chunk &= chunk - 1;
-    if ( off + bytes > PAGE_SIZE )
-        while ( off & (chunk - 1) )
-            chunk >>= 1;
 
     if ( vio->mmio_access.write_access &&
          (vio->mmio_gva == (addr & PAGE_MASK)) )
-    {
-        gpa = (((paddr_t)vio->mmio_gpfn << PAGE_SHIFT) | off);
-        while ( (off + chunk) <= PAGE_SIZE )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                return rc;
-            addr += chunk;
-            off += chunk;
-            gpa += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-        }
-    }
+        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
     if ( (seg != x86_seg_none) &&
          (hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3) )
@@ -753,30 +787,7 @@ static int hvmemul_write(
     case HVMCOPY_bad_gva_to_gfn:
         return X86EMUL_EXCEPTION;
     case HVMCOPY_bad_gfn_to_mfn:
-        rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                    hvmemul_ctxt);
-        while ( rc == X86EMUL_OKAY )
-        {
-            rc = hvmemul_do_mmio_buffer(gpa, &reps, chunk, IOREQ_WRITE, 0,
-                                        p_data);
-            if ( rc != X86EMUL_OKAY || bytes == chunk )
-                break;
-            addr += chunk;
-            off += chunk;
-            p_data += chunk;
-            bytes -= chunk;
-            if ( bytes < chunk )
-                chunk = bytes;
-            if ( off < PAGE_SIZE )
-                gpa += chunk;
-            else
-            {
-                rc = hvmemul_linear_to_phys(addr, &gpa, chunk, &reps, pfec,
-                                            hvmemul_ctxt);
-                off = 0;
-            }
-        }
-        return rc;
+        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
     case HVMCOPY_gfn_paged_out:
     case HVMCOPY_gfn_shared:
         return X86EMUL_RETRY;
-- 
1.7.10.4

  parent reply	other threads:[~2015-06-24 11:24 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-06-24 11:24 [PATCH v4 00/17] x86/hvm: I/O emulation cleanup and fix Paul Durrant
2015-06-24 11:24 ` [PATCH v4 01/17] x86/hvm: simplify hvmemul_do_io() Paul Durrant
2015-06-24 11:24 ` [PATCH v4 02/17] x86/hvm: remove hvm_io_pending() check in hvmemul_do_io() Paul Durrant
2015-06-24 11:24 ` [PATCH v4 03/17] x86/hvm: remove extraneous parameter from hvmtrace_io_assist() Paul Durrant
2015-06-24 11:24 ` Paul Durrant [this message]
2015-06-24 12:33   ` [PATCH v4 04/17] x86/hvm: remove multiple open coded 'chunking' loops Jan Beulich
2015-06-24 12:59     ` Paul Durrant
2015-06-24 13:09       ` Jan Beulich
2015-06-24 11:24 ` [PATCH v4 05/17] x86/hvm: re-name struct hvm_mmio_handler to hvm_mmio_ops Paul Durrant
2015-06-24 11:24 ` [PATCH v4 06/17] x86/hvm: unify internal portio and mmio intercepts Paul Durrant
2015-06-24 11:24 ` [PATCH v4 07/17] x86/hvm: add length to mmio check op Paul Durrant
2015-06-24 13:08   ` Jan Beulich
2015-06-24 13:14     ` Paul Durrant
2015-06-24 13:25       ` Jan Beulich
2015-06-24 13:34         ` Paul Durrant
2015-06-24 14:01           ` Jan Beulich
2015-06-25 12:21   ` Andrew Cooper
2015-06-25 12:46     ` Jan Beulich
2015-06-25 13:08       ` Paul Durrant
2015-06-25 13:29         ` Jan Beulich
2015-06-25 13:30           ` Paul Durrant
2015-06-25 13:34       ` Andrew Cooper
2015-06-25 13:36         ` Paul Durrant
2015-06-25 13:37           ` Andrew Cooper
2015-06-25 13:38             ` Paul Durrant
2015-06-25 13:46               ` Andrew Cooper
2015-06-25 13:48                 ` Paul Durrant
2015-06-25 13:47           ` Jan Beulich
2015-06-25 13:52             ` Paul Durrant
2015-06-25 14:46               ` Jan Beulich
2015-06-25 14:57                 ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 08/17] x86/hvm: unify dpci portio intercept with standard portio intercept Paul Durrant
2015-06-24 13:46   ` Jan Beulich
2015-06-24 11:24 ` [PATCH v4 09/17] x86/hvm: unify stdvga mmio intercept with standard mmio intercept Paul Durrant
2015-06-24 13:59   ` Jan Beulich
2015-06-24 14:12     ` Paul Durrant
2015-06-24 14:30       ` Jan Beulich
2015-06-24 14:43         ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 10/17] x86/hvm: revert 82ed8716b "fix direct PCI port I/O emulation retry Paul Durrant
2015-06-24 15:21   ` Jan Beulich
2015-06-24 15:23     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 11/17] x86/hvm: only call hvm_io_assist() from hvm_wait_for_io() Paul Durrant
2015-06-24 15:36   ` Jan Beulich
2015-06-24 15:50     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 12/17] x86/hvm: split I/O completion handling from state model Paul Durrant
2015-06-25  9:40   ` Jan Beulich
2015-06-25 15:59     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 13/17] x86/hvm: remove HVMIO_dispatched I/O state Paul Durrant
2015-06-24 15:52   ` Jan Beulich
2015-06-24 16:00     ` Paul Durrant
2015-06-24 16:12       ` Jan Beulich
2015-06-24 17:00         ` Paul Durrant
2015-06-25 12:29   ` Andrew Cooper
2015-06-24 11:24 ` [PATCH v4 14/17] x86/hvm: remove hvm_io_state enumeration Paul Durrant
2015-06-25  9:43   ` Jan Beulich
2015-06-25  9:46     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 15/17] x86/hvm: use ioreq_t to track in-flight state Paul Durrant
2015-06-25  9:51   ` Jan Beulich
2015-06-25 10:17     ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 16/17] x86/hvm: always re-emulate I/O from a buffer Paul Durrant
2015-06-25  9:57   ` Jan Beulich
2015-06-25 10:32     ` Paul Durrant
2015-06-25 10:50       ` Jan Beulich
2015-06-25 10:52         ` Paul Durrant
2015-06-24 11:24 ` [PATCH v4 17/17] x86/hvm: track large memory mapped accesses by buffer offset Paul Durrant
2015-06-25 10:46   ` Jan Beulich
2015-06-25 10:51     ` Paul Durrant
2015-06-25 11:05       ` Jan Beulich
2015-06-25 10:55     ` Paul Durrant
2015-06-25 11:08       ` Jan Beulich
2015-06-24 12:16 ` [PATCH v4 00/17] x86/hvm: I/O emulation cleanup and fix Jan Beulich
2015-06-24 12:52   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1435145089-21999-5-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=keir@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.