All of lore.kernel.org
 help / color / mirror / Atom feed
From: Igor Druzhinin <igor.druzhinin@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Igor Druzhinin <igor.druzhinin@citrix.com>,
	wei.liu2@citrix.com, andrew.cooper3@citrix.com,
	paul.durrant@citrix.com, jbeulich@suse.com, roger.pau@citrix.com
Subject: [PATCH v3 1/2] x86/hvm: split all linear reads and writes at page boundary
Date: Thu, 14 Mar 2019 22:30:48 +0000	[thread overview]
Message-ID: <1552602649-14358-1-git-send-email-igor.druzhinin@citrix.com> (raw)

Ruling out page straddling at linear level makes it easier to
distinguish chunks that require proper handling as MMIO access
and not complete them as page straddling memory transactions
prematurely. This doesn't change the general behavior.

Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com>
---
Changes in v3:
* new patch in v3 to address the concern of P2M type change along with
  page straddling
---
 xen/arch/x86/hvm/emulate.c | 72 ++++++++++++++++++++++++----------------------
 1 file changed, 38 insertions(+), 34 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 754baf6..4879ccb 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1089,12 +1089,25 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
                        uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     pagefault_info_t pfinfo;
-    int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+    unsigned int offset = addr & ~PAGE_MASK;
+    int rc;
 
-    switch ( rc )
+    if ( offset + bytes > PAGE_SIZE )
     {
-        unsigned int offset, part1;
+        unsigned int part1 = PAGE_SIZE - offset;
+
+        /* Split the access at the page boundary. */
+        rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
+        if ( rc == X86EMUL_OKAY )
+            rc = linear_read(addr + part1, bytes - part1, p_data + part1,
+                             pfec, hvmemul_ctxt);
+        return rc;
+    }
+
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
 
+    switch ( rc )
+    {
     case HVMTRANS_okay:
         return X86EMUL_OKAY;
 
@@ -1106,20 +1119,9 @@ static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
         if ( pfec & PFEC_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
 
-        offset = addr & ~PAGE_MASK;
-        if ( offset + bytes <= PAGE_SIZE )
-            return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
-                                            hvmemul_ctxt,
-                                            known_gla(addr, bytes, pfec));
-
-        /* Split the access at the page boundary. */
-        part1 = PAGE_SIZE - offset;
-        rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
-        if ( rc == X86EMUL_OKAY )
-            rc = linear_read(addr + part1, bytes - part1, p_data + part1,
-                             pfec, hvmemul_ctxt);
-        return rc;
-
+        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
+                                        hvmemul_ctxt,
+                                        known_gla(addr, bytes, pfec));
     case HVMTRANS_gfn_paged_out:
     case HVMTRANS_gfn_shared:
         return X86EMUL_RETRY;
@@ -1132,12 +1134,25 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data,
                         uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
     pagefault_info_t pfinfo;
-    int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+    unsigned int offset = addr & ~PAGE_MASK;
+    int rc;
 
-    switch ( rc )
+    if ( offset + bytes > PAGE_SIZE )
     {
-        unsigned int offset, part1;
+        unsigned int part1 = PAGE_SIZE - offset;
 
+        /* Split the access at the page boundary. */
+        rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
+        if ( rc == X86EMUL_OKAY )
+            rc = linear_write(addr + part1, bytes - part1, p_data + part1,
+                              pfec, hvmemul_ctxt);
+        return rc;
+    }
+
+    rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+
+    switch ( rc )
+    {
     case HVMTRANS_okay:
         return X86EMUL_OKAY;
 
@@ -1146,20 +1161,9 @@ static int linear_write(unsigned long addr, unsigned int bytes, void *p_data,
         return X86EMUL_EXCEPTION;
 
     case HVMTRANS_bad_gfn_to_mfn:
-        offset = addr & ~PAGE_MASK;
-        if ( offset + bytes <= PAGE_SIZE )
-            return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
-                                             hvmemul_ctxt,
-                                             known_gla(addr, bytes, pfec));
-
-        /* Split the access at the page boundary. */
-        part1 = PAGE_SIZE - offset;
-        rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
-        if ( rc == X86EMUL_OKAY )
-            rc = linear_write(addr + part1, bytes - part1, p_data + part1,
-                              pfec, hvmemul_ctxt);
-        return rc;
-
+        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
+                                         hvmemul_ctxt,
+                                         known_gla(addr, bytes, pfec));
     case HVMTRANS_gfn_paged_out:
     case HVMTRANS_gfn_shared:
         return X86EMUL_RETRY;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

             reply	other threads:[~2019-03-14 22:31 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-14 22:30 Igor Druzhinin [this message]
2019-03-14 22:30 ` [PATCH v3 2/2] x86/hvm: finish IOREQs correctly on completion path Igor Druzhinin
2019-03-15  9:28   ` Paul Durrant
2019-03-15 12:27   ` Jan Beulich
2019-03-15 13:05     ` Igor Druzhinin
2019-03-15 13:16       ` Jan Beulich
2019-03-15  9:23 ` [PATCH v3 1/2] x86/hvm: split all linear reads and writes at page boundary Paul Durrant
2019-03-15 12:06 ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1552602649-14358-1-git-send-email-igor.druzhinin@citrix.com \
    --to=igor.druzhinin@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=jbeulich@suse.com \
    --cc=paul.durrant@citrix.com \
    --cc=roger.pau@citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.