All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 0/3] Various XSA followups
@ 2017-09-19 14:14 Alexandru Isaila
  2017-09-19 14:14 ` [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result Alexandru Isaila
                   ` (2 more replies)
  0 siblings, 3 replies; 12+ messages in thread
From: Alexandru Isaila @ 2017-09-19 14:14 UTC (permalink / raw)
  To: xen-devel
  Cc: jun.nakajima, kevin.tian, sstabellini, wei.liu2,
	suravee.suthikulpanit, george.dunlap, andrew.cooper3, tim,
	paul.durrant, jbeulich, boris.ostrovsky, ian.jackson

XSA-219 was discovered while trying to implement the bugfix in patch 3.

Andrew Cooper (3):
 [RFC] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result
 x86/hvm: Break out __hvm_copy()'s translation logic
 x86/hvm: Implement hvmemul_write() using real mappings

Alexandru Isaila (2):
 x86/hvm: Break out __hvm_copy()'s translation logic
 x86/hvm: Implement hvmemul_write() using real mappings

---
Change log : I did not address the comments that are still in debate




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result
  2017-09-19 14:14 [PATCH v3 0/3] Various XSA followups Alexandru Isaila
@ 2017-09-19 14:14 ` Alexandru Isaila
  2017-09-19 14:16   ` Paul Durrant
  2017-09-19 14:39   ` Jan Beulich
  2017-09-19 14:14 ` [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic Alexandru Isaila
  2017-09-19 14:14 ` [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings Alexandru Isaila
  2 siblings, 2 replies; 12+ messages in thread
From: Alexandru Isaila @ 2017-09-19 14:14 UTC (permalink / raw)
  To: xen-devel
  Cc: jun.nakajima, kevin.tian, sstabellini, wei.liu2,
	suravee.suthikulpanit, george.dunlap, andrew.cooper3, tim,
	paul.durrant, jbeulich, boris.ostrovsky, ian.jackson

From: Andrew Cooper <andrew.cooper3@citrix.com>

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

---
Acked-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
---
 xen/arch/x86/hvm/dom0_build.c     |  2 +-
 xen/arch/x86/hvm/emulate.c        | 40 ++++++++++++++--------------
 xen/arch/x86/hvm/hvm.c            | 56 +++++++++++++++++++--------------------
 xen/arch/x86/hvm/intercept.c      | 20 +++++++-------
 xen/arch/x86/hvm/svm/nestedsvm.c  |  5 ++--
 xen/arch/x86/hvm/svm/svm.c        |  2 +-
 xen/arch/x86/hvm/viridian.c       |  2 +-
 xen/arch/x86/hvm/vmsi.c           |  2 +-
 xen/arch/x86/hvm/vmx/realmode.c   |  2 +-
 xen/arch/x86/hvm/vmx/vvmx.c       | 14 +++++-----
 xen/arch/x86/mm/shadow/common.c   | 12 ++++-----
 xen/common/libelf/libelf-loader.c |  4 +--
 xen/include/asm-x86/hvm/support.h | 40 ++++++++++++++--------------
 13 files changed, 101 insertions(+), 100 deletions(-)

diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 020c355..e8f746c 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -238,7 +238,7 @@ static int __init pvh_setup_vmx_realmode_helpers(struct domain *d)
     if ( !pvh_steal_ram(d, HVM_VM86_TSS_SIZE, 128, GB(4), &gaddr) )
     {
         if ( hvm_copy_to_guest_phys(gaddr, NULL, HVM_VM86_TSS_SIZE, v) !=
-             HVMCOPY_okay )
+             HVMTRANS_okay )
             printk("Unable to zero VM86 TSS area\n");
         d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED] =
             VM86_TSS_UPDATED | ((uint64_t)HVM_VM86_TSS_SIZE << 32) | gaddr;
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 54811c1..cc874ce 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -100,7 +100,7 @@ static int ioreq_server_read(const struct hvm_io_handler *io_handler,
                     uint32_t size,
                     uint64_t *data)
 {
-    if ( hvm_copy_from_guest_phys(data, addr, size) != HVMCOPY_okay )
+    if ( hvm_copy_from_guest_phys(data, addr, size) != HVMTRANS_okay )
         return X86EMUL_UNHANDLEABLE;
 
     return X86EMUL_OKAY;
@@ -893,18 +893,18 @@ static int __hvmemul_read(
 
     switch ( rc )
     {
-    case HVMCOPY_okay:
+    case HVMTRANS_okay:
         break;
-    case HVMCOPY_bad_gva_to_gfn:
+    case HVMTRANS_bad_linear_to_gfn:
         x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
         return X86EMUL_EXCEPTION;
-    case HVMCOPY_bad_gfn_to_mfn:
+    case HVMTRANS_bad_gfn_to_mfn:
         if ( access_type == hvm_access_insn_fetch )
             return X86EMUL_UNHANDLEABLE;
 
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
-    case HVMCOPY_gfn_paged_out:
-    case HVMCOPY_gfn_shared:
+    case HVMTRANS_gfn_paged_out:
+    case HVMTRANS_gfn_shared:
         return X86EMUL_RETRY;
     default:
         return X86EMUL_UNHANDLEABLE;
@@ -1012,15 +1012,15 @@ static int hvmemul_write(
 
     switch ( rc )
     {
-    case HVMCOPY_okay:
+    case HVMTRANS_okay:
         break;
-    case HVMCOPY_bad_gva_to_gfn:
+    case HVMTRANS_bad_linear_to_gfn:
         x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
         return X86EMUL_EXCEPTION;
-    case HVMCOPY_bad_gfn_to_mfn:
+    case HVMTRANS_bad_gfn_to_mfn:
         return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
-    case HVMCOPY_gfn_paged_out:
-    case HVMCOPY_gfn_shared:
+    case HVMTRANS_gfn_paged_out:
+    case HVMTRANS_gfn_shared:
         return X86EMUL_RETRY;
     default:
         return X86EMUL_UNHANDLEABLE;
@@ -1384,7 +1384,7 @@ static int hvmemul_rep_movs(
             return rc;
         }
 
-        rc = HVMCOPY_okay;
+        rc = HVMTRANS_okay;
     }
     else
         /*
@@ -1394,16 +1394,16 @@ static int hvmemul_rep_movs(
          */
         rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
 
-    if ( rc == HVMCOPY_okay )
+    if ( rc == HVMTRANS_okay )
         rc = hvm_copy_to_guest_phys(dgpa, buf, bytes, current);
 
     xfree(buf);
 
-    if ( rc == HVMCOPY_gfn_paged_out )
+    if ( rc == HVMTRANS_gfn_paged_out )
         return X86EMUL_RETRY;
-    if ( rc == HVMCOPY_gfn_shared )
+    if ( rc == HVMTRANS_gfn_shared )
         return X86EMUL_RETRY;
-    if ( rc != HVMCOPY_okay )
+    if ( rc != HVMTRANS_okay )
     {
         gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS: sgpa=%"
                  PRIpaddr" dgpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n",
@@ -1513,10 +1513,10 @@ static int hvmemul_rep_stos(
 
         switch ( rc )
         {
-        case HVMCOPY_gfn_paged_out:
-        case HVMCOPY_gfn_shared:
+        case HVMTRANS_gfn_paged_out:
+        case HVMTRANS_gfn_shared:
             return X86EMUL_RETRY;
-        case HVMCOPY_okay:
+        case HVMTRANS_okay:
             return X86EMUL_OKAY;
         }
 
@@ -2172,7 +2172,7 @@ void hvm_emulate_init_per_insn(
                                         &addr) &&
              hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
                                          sizeof(hvmemul_ctxt->insn_buf),
-                                         pfec, NULL) == HVMCOPY_okay) ?
+                                         pfec, NULL) == HVMTRANS_okay) ?
             sizeof(hvmemul_ctxt->insn_buf) : 0;
     }
     else
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 6cb903d..488acbf 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2915,9 +2915,9 @@ void hvm_task_switch(
 
     rc = hvm_copy_from_guest_linear(
         &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
-    if ( rc == HVMCOPY_bad_gva_to_gfn )
+    if ( rc == HVMTRANS_bad_linear_to_gfn )
         hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
-    if ( rc != HVMCOPY_okay )
+    if ( rc != HVMTRANS_okay )
         goto out;
 
     eflags = regs->eflags;
@@ -2955,20 +2955,20 @@ void hvm_task_switch(
                                   offsetof(typeof(tss), trace) -
                                   offsetof(typeof(tss), eip),
                                   PFEC_page_present, &pfinfo);
-    if ( rc == HVMCOPY_bad_gva_to_gfn )
+    if ( rc == HVMTRANS_bad_linear_to_gfn )
         hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
-    if ( rc != HVMCOPY_okay )
+    if ( rc != HVMTRANS_okay )
         goto out;
 
     rc = hvm_copy_from_guest_linear(
         &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
-    if ( rc == HVMCOPY_bad_gva_to_gfn )
+    if ( rc == HVMTRANS_bad_linear_to_gfn )
         hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
     /*
-     * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
+     * Note: The HVMTRANS_gfn_shared case could be optimised, if the callee
      * functions knew we want RO access.
      */
-    if ( rc != HVMCOPY_okay )
+    if ( rc != HVMTRANS_okay )
         goto out;
 
     new_cpl = tss.eflags & X86_EFLAGS_VM ? 3 : tss.cs & 3;
@@ -3010,12 +3010,12 @@ void hvm_task_switch(
         rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss), back_link),
                                       &tss.back_link, sizeof(tss.back_link), 0,
                                       &pfinfo);
-        if ( rc == HVMCOPY_bad_gva_to_gfn )
+        if ( rc == HVMTRANS_bad_linear_to_gfn )
         {
             hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
             exn_raised = 1;
         }
-        else if ( rc != HVMCOPY_okay )
+        else if ( rc != HVMTRANS_okay )
             goto out;
     }
 
@@ -3051,12 +3051,12 @@ void hvm_task_switch(
         {
             rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
                                           &pfinfo);
-            if ( rc == HVMCOPY_bad_gva_to_gfn )
+            if ( rc == HVMTRANS_bad_linear_to_gfn )
             {
                 hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
                 exn_raised = 1;
             }
-            else if ( rc != HVMCOPY_okay )
+            else if ( rc != HVMTRANS_okay )
                 goto out;
         }
     }
@@ -3073,7 +3073,7 @@ void hvm_task_switch(
 #define HVMCOPY_to_guest   (1u<<0)
 #define HVMCOPY_phys       (0u<<2)
 #define HVMCOPY_linear     (1u<<2)
-static enum hvm_copy_result __hvm_copy(
+static enum hvm_translation_result __hvm_copy(
     void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
     uint32_t pfec, pagefault_info_t *pfinfo)
 {
@@ -3098,7 +3098,7 @@ static enum hvm_copy_result __hvm_copy(
      * Hence we bail immediately if called from atomic context.
      */
     if ( in_atomic() )
-        return HVMCOPY_unhandleable;
+        return HVMTRANS_unhandleable;
 #endif
 
     while ( todo > 0 )
@@ -3113,15 +3113,15 @@ static enum hvm_copy_result __hvm_copy(
             if ( gfn == gfn_x(INVALID_GFN) )
             {
                 if ( pfec & PFEC_page_paged )
-                    return HVMCOPY_gfn_paged_out;
+                    return HVMTRANS_gfn_paged_out;
                 if ( pfec & PFEC_page_shared )
-                    return HVMCOPY_gfn_shared;
+                    return HVMTRANS_gfn_shared;
                 if ( pfinfo )
                 {
                     pfinfo->linear = addr;
                     pfinfo->ec = pfec & ~PFEC_implicit;
                 }
-                return HVMCOPY_bad_gva_to_gfn;
+                return HVMTRANS_bad_linear_to_gfn;
             }
             gpa |= (paddr_t)gfn << PAGE_SHIFT;
         }
@@ -3139,28 +3139,28 @@ static enum hvm_copy_result __hvm_copy(
         if ( v == current
              && !nestedhvm_vcpu_in_guestmode(v)
              && hvm_mmio_internal(gpa) )
-            return HVMCOPY_bad_gfn_to_mfn;
+            return HVMTRANS_bad_gfn_to_mfn;
 
         page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
 
         if ( !page )
-            return HVMCOPY_bad_gfn_to_mfn;
+            return HVMTRANS_bad_gfn_to_mfn;
 
         if ( p2m_is_paging(p2mt) )
         {
             put_page(page);
             p2m_mem_paging_populate(v->domain, gfn);
-            return HVMCOPY_gfn_paged_out;
+            return HVMTRANS_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
         {
             put_page(page);
-            return HVMCOPY_gfn_shared;
+            return HVMTRANS_gfn_shared;
         }
         if ( p2m_is_grant(p2mt) )
         {
             put_page(page);
-            return HVMCOPY_unhandleable;
+            return HVMTRANS_unhandleable;
         }
 
         p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
@@ -3198,24 +3198,24 @@ static enum hvm_copy_result __hvm_copy(
         put_page(page);
     }
 
-    return HVMCOPY_okay;
+    return HVMTRANS_okay;
 }
 
-enum hvm_copy_result hvm_copy_to_guest_phys(
+enum hvm_translation_result hvm_copy_to_guest_phys(
     paddr_t paddr, void *buf, int size, struct vcpu *v)
 {
     return __hvm_copy(buf, paddr, size, v,
                       HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL);
 }
 
-enum hvm_copy_result hvm_copy_from_guest_phys(
+enum hvm_translation_result hvm_copy_from_guest_phys(
     void *buf, paddr_t paddr, int size)
 {
     return __hvm_copy(buf, paddr, size, current,
                       HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
 }
 
-enum hvm_copy_result hvm_copy_to_guest_linear(
+enum hvm_translation_result hvm_copy_to_guest_linear(
     unsigned long addr, void *buf, int size, uint32_t pfec,
     pagefault_info_t *pfinfo)
 {
@@ -3224,7 +3224,7 @@ enum hvm_copy_result hvm_copy_to_guest_linear(
                       PFEC_page_present | PFEC_write_access | pfec, pfinfo);
 }
 
-enum hvm_copy_result hvm_copy_from_guest_linear(
+enum hvm_translation_result hvm_copy_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo)
 {
@@ -3233,7 +3233,7 @@ enum hvm_copy_result hvm_copy_from_guest_linear(
                       PFEC_page_present | pfec, pfinfo);
 }
 
-enum hvm_copy_result hvm_fetch_from_guest_linear(
+enum hvm_translation_result hvm_fetch_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo)
 {
@@ -3670,7 +3670,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
                                         sizeof(sig), hvm_access_insn_fetch,
                                         cs, &addr) &&
              (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
-                                          walk, NULL) == HVMCOPY_okay) &&
+                                          walk, NULL) == HVMTRANS_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
         {
             regs->rip += sizeof(sig);
diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
index e51efd5..ef82419 100644
--- a/xen/arch/x86/hvm/intercept.c
+++ b/xen/arch/x86/hvm/intercept.c
@@ -136,14 +136,14 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
                 switch ( hvm_copy_to_guest_phys(p->data + step * i,
                                                 &data, p->size, current) )
                 {
-                case HVMCOPY_okay:
+                case HVMTRANS_okay:
                     break;
-                case HVMCOPY_bad_gfn_to_mfn:
+                case HVMTRANS_bad_gfn_to_mfn:
                     /* Drop the write as real hardware would. */
                     continue;
-                case HVMCOPY_bad_gva_to_gfn:
-                case HVMCOPY_gfn_paged_out:
-                case HVMCOPY_gfn_shared:
+                case HVMTRANS_bad_linear_to_gfn:
+                case HVMTRANS_gfn_paged_out:
+                case HVMTRANS_gfn_shared:
                     ASSERT_UNREACHABLE();
                     /* fall through */
                 default:
@@ -164,14 +164,14 @@ int hvm_process_io_intercept(const struct hvm_io_handler *handler,
                 switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
                                                   p->size) )
                 {
-                case HVMCOPY_okay:
+                case HVMTRANS_okay:
                     break;
-                case HVMCOPY_bad_gfn_to_mfn:
+                case HVMTRANS_bad_gfn_to_mfn:
                     data = ~0;
                     break;
-                case HVMCOPY_bad_gva_to_gfn:
-                case HVMCOPY_gfn_paged_out:
-                case HVMCOPY_gfn_shared:
+                case HVMTRANS_bad_linear_to_gfn:
+                case HVMTRANS_gfn_paged_out:
+                case HVMTRANS_gfn_shared:
                     ASSERT_UNREACHABLE();
                     /* fall through */
                 default:
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 8fd9c23..66a1777 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -357,7 +357,7 @@ static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t viopm)
     struct vmcb_struct *host_vmcb = arch_svm->vmcb;
     unsigned long *ns_msrpm_ptr;
     unsigned int i;
-    enum hvm_copy_result ret;
+    enum hvm_translation_result ret;
     unsigned long *ns_viomap;
     bool_t ioport_80 = 1, ioport_ed = 1;
 
@@ -365,7 +365,8 @@ static int nsvm_vmrun_permissionmap(struct vcpu *v, bool_t viopm)
 
     ret = hvm_copy_from_guest_phys(svm->ns_cached_msrpm,
                                    ns_vmcb->_msrpm_base_pa, MSRPM_SIZE);
-    if (ret != HVMCOPY_okay) {
+    if ( ret != HVMTRANS_okay )
+    {
         gdprintk(XENLOG_ERR, "hvm_copy_from_guest_phys msrpm %u\n", ret);
         return 1;
     }
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 6b19b16..12ddc8a 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1266,7 +1266,7 @@ static void svm_emul_swint_injection(struct x86_event *event)
                                     PFEC_implicit, &pfinfo);
     if ( rc )
     {
-        if ( rc == HVMCOPY_bad_gva_to_gfn )
+        if ( rc == HVMTRANS_bad_linear_to_gfn )
         {
             fault = TRAP_page_fault;
             ec = pfinfo.ec;
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index e0546f3..f0fa59d 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -914,7 +914,7 @@ int viridian_hypercall(struct cpu_user_regs *regs)
 
         /* Get input parameters. */
         if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
-                                      sizeof(input_params)) != HVMCOPY_okay )
+                                      sizeof(input_params)) != HVMTRANS_okay )
             break;
 
         /*
diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
index 9b35e9b..7126de7 100644
--- a/xen/arch/x86/hvm/vmsi.c
+++ b/xen/arch/x86/hvm/vmsi.c
@@ -609,7 +609,7 @@ void msix_write_completion(struct vcpu *v)
         if ( desc &&
              hvm_copy_from_guest_phys(&data,
                                       v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa,
-                                      sizeof(data)) == HVMCOPY_okay &&
+                                      sizeof(data)) == HVMTRANS_okay &&
              !(data & PCI_MSIX_VECTOR_BITMASK) )
             ctrl_address = snoop_addr;
     }
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 11bde58..12d43ad 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -40,7 +40,7 @@ static void realmode_deliver_exception(
     last_byte = (vector * 4) + 3;
     if ( idtr->limit < last_byte ||
          hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) !=
-         HVMCOPY_okay )
+         HVMTRANS_okay )
     {
         /* Software interrupt? */
         if ( insn_len != 0 )
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index e2361a1..cd0ee0a 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -481,9 +481,9 @@ static int decode_vmx_inst(struct cpu_user_regs *regs,
             int rc = hvm_copy_from_guest_linear(poperandS, base, size,
                                                 0, &pfinfo);
 
-            if ( rc == HVMCOPY_bad_gva_to_gfn )
+            if ( rc == HVMTRANS_bad_linear_to_gfn )
                 hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
-            if ( rc != HVMCOPY_okay )
+            if ( rc != HVMTRANS_okay )
                 return X86EMUL_EXCEPTION;
         }
         decode->mem = base;
@@ -1468,7 +1468,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
     }
 
     rc = hvm_copy_from_guest_phys(&nvmcs_revid, gpa, sizeof(nvmcs_revid));
-    if ( rc != HVMCOPY_okay ||
+    if ( rc != HVMTRANS_okay ||
          (nvmcs_revid & ~VMX_BASIC_REVISION_MASK) ||
          ((nvmcs_revid ^ vmx_basic_msr) & VMX_BASIC_REVISION_MASK) )
     {
@@ -1746,9 +1746,9 @@ int nvmx_handle_vmptrst(struct cpu_user_regs *regs)
     gpa = nvcpu->nv_vvmcxaddr;
 
     rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0, &pfinfo);
-    if ( rc == HVMCOPY_bad_gva_to_gfn )
+    if ( rc == HVMTRANS_bad_linear_to_gfn )
         hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
-    if ( rc != HVMCOPY_okay )
+    if ( rc != HVMTRANS_okay )
         return X86EMUL_EXCEPTION;
 
     vmsucceed(regs);
@@ -1835,9 +1835,9 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
     switch ( decode.type ) {
     case VMX_INST_MEMREG_TYPE_MEMORY:
         rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0, &pfinfo);
-        if ( rc == HVMCOPY_bad_gva_to_gfn )
+        if ( rc == HVMTRANS_bad_linear_to_gfn )
             hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
-        if ( rc != HVMCOPY_okay )
+        if ( rc != HVMTRANS_okay )
             return X86EMUL_EXCEPTION;
         break;
     case VMX_INST_MEMREG_TYPE_REG:
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 3926ed6..8b9310c 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -196,16 +196,16 @@ hvm_read(enum x86_segment seg,
 
     switch ( rc )
     {
-    case HVMCOPY_okay:
+    case HVMTRANS_okay:
         return X86EMUL_OKAY;
-    case HVMCOPY_bad_gva_to_gfn:
+    case HVMTRANS_bad_linear_to_gfn:
         x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt);
         return X86EMUL_EXCEPTION;
-    case HVMCOPY_bad_gfn_to_mfn:
-    case HVMCOPY_unhandleable:
+    case HVMTRANS_bad_gfn_to_mfn:
+    case HVMTRANS_unhandleable:
         return X86EMUL_UNHANDLEABLE;
-    case HVMCOPY_gfn_paged_out:
-    case HVMCOPY_gfn_shared:
+    case HVMTRANS_gfn_paged_out:
+    case HVMTRANS_gfn_shared:
         return X86EMUL_RETRY;
     }
 
diff --git a/xen/common/libelf/libelf-loader.c b/xen/common/libelf/libelf-loader.c
index c8b7ec9..0f46872 100644
--- a/xen/common/libelf/libelf-loader.c
+++ b/xen/common/libelf/libelf-loader.c
@@ -154,10 +154,10 @@ static elf_errorstatus elf_memcpy(struct vcpu *v, void *dst, void *src,
 #ifdef CONFIG_X86
     if ( is_hvm_vcpu(v) )
     {
-        enum hvm_copy_result rc;
+        enum hvm_translation_result rc;
 
         rc = hvm_copy_to_guest_phys((paddr_t)dst, src, size, v);
-        return rc != HVMCOPY_okay ? -1 : 0;
+        return rc != HVMTRANS_okay ? -1 : 0;
     }
 #endif
 
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index b18dbb6..e3b035d 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -53,23 +53,23 @@ extern unsigned int opt_hvm_debug_level;
 
 extern unsigned long hvm_io_bitmap[];
 
-enum hvm_copy_result {
-    HVMCOPY_okay = 0,
-    HVMCOPY_bad_gva_to_gfn,
-    HVMCOPY_bad_gfn_to_mfn,
-    HVMCOPY_unhandleable,
-    HVMCOPY_gfn_paged_out,
-    HVMCOPY_gfn_shared,
+enum hvm_translation_result {
+    HVMTRANS_okay,
+    HVMTRANS_bad_linear_to_gfn,
+    HVMTRANS_bad_gfn_to_mfn,
+    HVMTRANS_unhandleable,
+    HVMTRANS_gfn_paged_out,
+    HVMTRANS_gfn_shared,
 };
 
 /*
  * Copy to/from a guest physical address.
- * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given physical
+ * Returns HVMTRANS_okay, else HVMTRANS_bad_gfn_to_mfn if the given physical
  * address range does not map entirely onto ordinary machine memory.
  */
-enum hvm_copy_result hvm_copy_to_guest_phys(
+enum hvm_translation_result hvm_copy_to_guest_phys(
     paddr_t paddr, void *buf, int size, struct vcpu *v);
-enum hvm_copy_result hvm_copy_from_guest_phys(
+enum hvm_translation_result hvm_copy_from_guest_phys(
     void *buf, paddr_t paddr, int size);
 
 /*
@@ -79,13 +79,13 @@ enum hvm_copy_result hvm_copy_from_guest_phys(
  * to set them.
  * 
  * Returns:
- *  HVMCOPY_okay: Copy was entirely successful.
- *  HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map to
- *                          ordinary machine memory.
- *  HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a valid
- *                          mapping to a guest physical address.  The
- *                          pagefault_info_t structure will be filled in if
- *                          provided.
+ *  HVMTRANS_okay: Copy was entirely successful.
+ *  HVMTRANS_bad_gfn_to_mfn: Some guest physical address did not map to
+ *                           ordinary machine memory.
+ *  HVMTRANS_bad_linear_to_gfn: Some guest linear address did not have a
+ *                              valid mapping to a guest physical address.
+ *                              The pagefault_info_t structure will be filled
+ *                              in if provided.
  */
 typedef struct pagefault_info
 {
@@ -93,13 +93,13 @@ typedef struct pagefault_info
     int ec;
 } pagefault_info_t;
 
-enum hvm_copy_result hvm_copy_to_guest_linear(
+enum hvm_translation_result hvm_copy_to_guest_linear(
     unsigned long addr, void *buf, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_copy_from_guest_linear(
+enum hvm_translation_result hvm_copy_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
-enum hvm_copy_result hvm_fetch_from_guest_linear(
+enum hvm_translation_result hvm_fetch_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
 
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic
  2017-09-19 14:14 [PATCH v3 0/3] Various XSA followups Alexandru Isaila
  2017-09-19 14:14 ` [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result Alexandru Isaila
@ 2017-09-19 14:14 ` Alexandru Isaila
  2017-09-19 14:27   ` Paul Durrant
  2017-09-19 14:14 ` [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings Alexandru Isaila
  2 siblings, 1 reply; 12+ messages in thread
From: Alexandru Isaila @ 2017-09-19 14:14 UTC (permalink / raw)
  To: xen-devel
  Cc: jun.nakajima, kevin.tian, sstabellini, wei.liu2,
	suravee.suthikulpanit, george.dunlap, andrew.cooper3, tim,
	paul.durrant, jbeulich, Alexandru Isaila, boris.ostrovsky,
	ian.jackson

From: Andrew Cooper <andrew.cooper3@citrix.com>

It will be reused by later changes.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V2:
	- Changed _gfn() to gaddr_to_gfn
	- Changed gfn_x to gfn_to_gaddr
---
 xen/arch/x86/hvm/hvm.c            | 144 +++++++++++++++++++++++---------------
 xen/include/asm-x86/hvm/support.h |  12 ++++
 2 files changed, 98 insertions(+), 58 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 488acbf..93394c1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3069,6 +3069,83 @@ void hvm_task_switch(
     hvm_unmap_entry(nptss_desc);
 }
 
+enum hvm_translation_result hvm_translate_get_page(
+    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
+    pagefault_info_t *pfinfo, struct page_info **page_p,
+    gfn_t *gfn_p, p2m_type_t *p2mt_p)
+{
+    struct page_info *page;
+    p2m_type_t p2mt;
+    gfn_t gfn;
+
+    if ( linear )
+    {
+        gfn = _gfn(paging_gva_to_gfn(v, addr, &pfec));
+
+        if ( gfn_eq(gfn, INVALID_GFN) )
+        {
+            if ( pfec & PFEC_page_paged )
+                return HVMTRANS_gfn_paged_out;
+
+            if ( pfec & PFEC_page_shared )
+                return HVMTRANS_gfn_shared;
+
+            if ( pfinfo )
+            {
+                pfinfo->linear = addr;
+                pfinfo->ec = pfec & ~PFEC_implicit;
+            }
+
+            return HVMTRANS_bad_linear_to_gfn;
+        }
+    }
+    else
+    {
+        gfn = gaddr_to_gfn(addr);
+        ASSERT(!pfinfo);
+    }
+
+    /*
+     * No need to do the P2M lookup for internally handled MMIO, benefiting
+     * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
+     * - newer Windows (like Server 2012) for HPET accesses.
+     */
+    if ( v == current
+         && !nestedhvm_vcpu_in_guestmode(v)
+         && hvm_mmio_internal(gfn_to_gaddr(gfn)) )
+        return HVMTRANS_bad_gfn_to_mfn;
+
+    page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE);
+
+    if ( !page )
+        return HVMTRANS_bad_gfn_to_mfn;
+
+    if ( p2m_is_paging(p2mt) )
+    {
+        put_page(page);
+        p2m_mem_paging_populate(v->domain, gfn_x(gfn));
+        return HVMTRANS_gfn_paged_out;
+    }
+    if ( p2m_is_shared(p2mt) )
+    {
+        put_page(page);
+        return HVMTRANS_gfn_shared;
+    }
+    if ( p2m_is_grant(p2mt) )
+    {
+        put_page(page);
+        return HVMTRANS_unhandleable;
+    }
+
+    *page_p = page;
+    if ( gfn_p )
+        *gfn_p = gfn;
+    if ( p2mt_p )
+        *p2mt_p = p2mt;
+
+    return HVMTRANS_okay;
+}
+
 #define HVMCOPY_from_guest (0u<<0)
 #define HVMCOPY_to_guest   (1u<<0)
 #define HVMCOPY_phys       (0u<<2)
@@ -3077,7 +3154,7 @@ static enum hvm_translation_result __hvm_copy(
     void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
     uint32_t pfec, pagefault_info_t *pfinfo)
 {
-    unsigned long gfn;
+    gfn_t gfn;
     struct page_info *page;
     p2m_type_t p2mt;
     char *p;
@@ -3103,65 +3180,15 @@ static enum hvm_translation_result __hvm_copy(
 
     while ( todo > 0 )
     {
+        enum hvm_translation_result res;
         paddr_t gpa = addr & ~PAGE_MASK;
 
         count = min_t(int, PAGE_SIZE - gpa, todo);
 
-        if ( flags & HVMCOPY_linear )
-        {
-            gfn = paging_gva_to_gfn(v, addr, &pfec);
-            if ( gfn == gfn_x(INVALID_GFN) )
-            {
-                if ( pfec & PFEC_page_paged )
-                    return HVMTRANS_gfn_paged_out;
-                if ( pfec & PFEC_page_shared )
-                    return HVMTRANS_gfn_shared;
-                if ( pfinfo )
-                {
-                    pfinfo->linear = addr;
-                    pfinfo->ec = pfec & ~PFEC_implicit;
-                }
-                return HVMTRANS_bad_linear_to_gfn;
-            }
-            gpa |= (paddr_t)gfn << PAGE_SHIFT;
-        }
-        else
-        {
-            gfn = addr >> PAGE_SHIFT;
-            gpa = addr;
-        }
-
-        /*
-         * No need to do the P2M lookup for internally handled MMIO, benefiting
-         * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
-         * - newer Windows (like Server 2012) for HPET accesses.
-         */
-        if ( v == current
-             && !nestedhvm_vcpu_in_guestmode(v)
-             && hvm_mmio_internal(gpa) )
-            return HVMTRANS_bad_gfn_to_mfn;
-
-        page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
-
-        if ( !page )
-            return HVMTRANS_bad_gfn_to_mfn;
-
-        if ( p2m_is_paging(p2mt) )
-        {
-            put_page(page);
-            p2m_mem_paging_populate(v->domain, gfn);
-            return HVMTRANS_gfn_paged_out;
-        }
-        if ( p2m_is_shared(p2mt) )
-        {
-            put_page(page);
-            return HVMTRANS_gfn_shared;
-        }
-        if ( p2m_is_grant(p2mt) )
-        {
-            put_page(page);
-            return HVMTRANS_unhandleable;
-        }
+        res = hvm_translate_get_page(v, addr, flags & HVMCOPY_linear,
+                                     pfec, pfinfo, &page, &gfn, &p2mt);
+        if ( res != HVMTRANS_okay )
+            return res;
 
         p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
 
@@ -3170,10 +3197,11 @@ static enum hvm_translation_result __hvm_copy(
             if ( p2m_is_discard_write(p2mt) )
             {
                 static unsigned long lastpage;
-                if ( xchg(&lastpage, gfn) != gfn )
+
+                if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) )
                     dprintk(XENLOG_G_DEBUG,
                             "%pv attempted write to read-only gfn %#lx (mfn=%#lx)\n",
-                            v, gfn, page_to_mfn(page));
+                            v, gfn_x(gfn), page_to_mfn(page));
             }
             else
             {
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index e3b035d..d784fc1 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -24,6 +24,7 @@
 #include <xen/sched.h>
 #include <asm/hvm/save.h>
 #include <asm/processor.h>
+#include <asm/p2m.h>
 
 #ifndef NDEBUG
 #define DBG_LEVEL_0                 (1 << 0)
@@ -103,6 +104,17 @@ enum hvm_translation_result hvm_fetch_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
 
+/*
+ * Get a reference on the page under an HVM physical or linear address.  If
+ * linear, a pagewalk is performed using pfec (fault details optionally in
+ * pfinfo).
+ * On success, returns HVMTRANS_okay with a reference taken on **_page.
+ */
+enum hvm_translation_result hvm_translate_get_page(
+    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
+    pagefault_info_t *pfinfo, struct page_info **page_p,
+    gfn_t *gfn_p, p2m_type_t *p2mt_p);
+
 #define HVM_HCALL_completed  0 /* hypercall completed - no further action */
 #define HVM_HCALL_preempted  1 /* hypercall preempted - re-execute VMCALL */
 int hvm_hypercall(struct cpu_user_regs *regs);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings
  2017-09-19 14:14 [PATCH v3 0/3] Various XSA followups Alexandru Isaila
  2017-09-19 14:14 ` [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result Alexandru Isaila
  2017-09-19 14:14 ` [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic Alexandru Isaila
@ 2017-09-19 14:14 ` Alexandru Isaila
  2017-09-19 14:38   ` Jan Beulich
  2017-09-19 14:39   ` Paul Durrant
  2 siblings, 2 replies; 12+ messages in thread
From: Alexandru Isaila @ 2017-09-19 14:14 UTC (permalink / raw)
  To: xen-devel
  Cc: jun.nakajima, kevin.tian, sstabellini, wei.liu2,
	suravee.suthikulpanit, george.dunlap, andrew.cooper3, tim,
	paul.durrant, jbeulich, Alexandru Isaila, boris.ostrovsky,
	ian.jackson

From: Andrew Cooper <andrew.cooper3@citrix.com>

An access which crosses a page boundary is performed atomically by x86
hardware, albeit with a severe performance penalty.  An important corner case
is when a straddled access hits two pages which differ in whether a
translation exists, or in net access rights.

The use of hvm_copy*() in hvmemul_write() is problematic, because it performs
a translation then completes the partial write, before moving onto the next
translation.

If an individual emulated write straddles two pages, the first of which is
writable, and the second of which is not, the first half of the write will
complete before #PF is raised from the second half.

This results in guest state corruption as a side effect of emulation, which
has been observed to cause windows to crash while under introspection.

Introduce the hvmemul_{,un}map_linear_addr() helpers, which translate an
entire contents of a linear access, and vmap() the underlying frames to
provide a contiguous virtual mapping for the emulator to use.  This is the
same mechanism as used by the shadow emulation code.

This will catch any translation issues and abort the emulation before any
modifications occur.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

---
Changes since V2:
	- Added linear & ~PAGE_MASK to return statement
	- Modified mfn - hvmemul_ctxt->mfn to final - first + 1
	- Remove useless else statement
---
 xen/arch/x86/hvm/emulate.c        | 177 ++++++++++++++++++++++++++++++++++----
 xen/include/asm-x86/hvm/emulate.h |   7 ++
 2 files changed, 167 insertions(+), 17 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index cc874ce..5574698 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -498,6 +498,159 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
 }
 
 /*
+ * Map the frame(s) covering an individual linear access, for writeable
+ * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
+ * including ERR_PTR(~X86EMUL_OKAY) for write-discard mappings.
+ *
+ * In debug builds, map() checks that each slot in hvmemul_ctxt->mfn[] is
+ * clean before use, and poisions unused slots with INVALID_MFN.
+ */
+static void *hvmemul_map_linear_addr(
+    unsigned long linear, unsigned int bytes, uint32_t pfec,
+    struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    struct vcpu *curr = current;
+    void *err, *mapping;
+
+    /* First and final gfns which need mapping. */
+    unsigned long frame = linear >> PAGE_SHIFT, first = frame;
+    unsigned long final = (linear + bytes - !!bytes) >> PAGE_SHIFT;
+
+    /*
+     * mfn points to the next free slot.  All used slots have a page reference
+     * held on them.
+     */
+    mfn_t *mfn = &hvmemul_ctxt->mfn[0];
+
+    /*
+     * The caller has no legitimate reason for trying a zero-byte write, but
+     * final is calculate to fail safe in release builds.
+     *
+     * The maximum write size depends on the number of adjacent mfns[] which
+     * can be vmap()'d, accouting for possible misalignment within the region.
+     * The higher level emulation callers are responsible for ensuring that
+     * mfns[] is large enough for the requested write size.
+     */
+    if ( bytes == 0 ||
+         final - first > ARRAY_SIZE(hvmemul_ctxt->mfn) - 1 )
+    {
+        ASSERT_UNREACHABLE();
+        goto unhandleable;
+    }
+
+    do {
+        enum hvm_translation_result res;
+        struct page_info *page;
+        pagefault_info_t pfinfo;
+        p2m_type_t p2mt;
+
+        /* Error checking.  Confirm that the current slot is clean. */
+        ASSERT(mfn_x(*mfn) == 0);
+
+        res = hvm_translate_get_page(curr, frame << PAGE_SHIFT, true, pfec,
+                                     &pfinfo, &page, NULL, &p2mt);
+
+        switch ( res )
+        {
+        case HVMTRANS_okay:
+            break;
+
+        case HVMTRANS_bad_linear_to_gfn:
+            x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+            err = ERR_PTR(~(long)X86EMUL_EXCEPTION);
+            goto out;
+
+        case HVMTRANS_bad_gfn_to_mfn:
+            err = NULL;
+            goto out;
+
+        case HVMTRANS_gfn_paged_out:
+        case HVMTRANS_gfn_shared:
+            err = ERR_PTR(~(long)X86EMUL_RETRY);
+            goto out;
+
+        default:
+            goto unhandleable;
+        }
+
+        *mfn++ = _mfn(page_to_mfn(page));
+        frame++;
+
+        if ( p2m_is_discard_write(p2mt) )
+        {
+            err = ERR_PTR(~(long)X86EMUL_OKAY);
+            goto out;
+        }
+
+    } while ( frame < final );
+
+    /* Entire access within a single frame? */
+    if ( first == final )
+        mapping = map_domain_page(hvmemul_ctxt->mfn[0]);
+    /* Multiple frames? Need to vmap(). */
+    else if ( (mapping = vmap(hvmemul_ctxt->mfn,
+                              final - first + 1)) == NULL )
+        goto unhandleable;
+
+#ifndef NDEBUG /* Poision unused mfn[]s with INVALID_MFN. */
+    while ( mfn < hvmemul_ctxt->mfn + ARRAY_SIZE(hvmemul_ctxt->mfn) )
+    {
+        ASSERT(mfn_x(*mfn) == 0);
+        *mfn++ = INVALID_MFN;
+    }
+#endif
+
+    return mapping + (linear & ~PAGE_MASK);
+
+ unhandleable:
+    err = ERR_PTR(~(long)X86EMUL_UNHANDLEABLE);
+
+ out:
+    /* Drop all held references. */
+    while ( mfn-- > hvmemul_ctxt->mfn )
+        put_page(mfn_to_page(mfn_x(*mfn)));
+
+    return err;
+}
+
+static void hvmemul_unmap_linear_addr(
+    void *mapping, unsigned long linear, unsigned int bytes,
+    struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    struct domain *currd = current->domain;
+    unsigned long frame = linear >> PAGE_SHIFT;
+    unsigned long final = (linear + bytes - !!bytes) >> PAGE_SHIFT;
+    mfn_t *mfn = &hvmemul_ctxt->mfn[0];
+
+    ASSERT(bytes > 0);
+
+    if ( frame == final )
+        unmap_domain_page(mapping);
+    else
+        vunmap(mapping);
+
+    do
+    {
+        ASSERT(mfn_valid(*mfn));
+        paging_mark_dirty(currd, *mfn);
+        put_page(mfn_to_page(mfn_x(*mfn)));
+
+        frame++;
+        *mfn++ = _mfn(0); /* Clean slot for map()'s error checking. */
+
+    } while ( frame < final );
+
+
+#ifndef NDEBUG /* Check (and clean) all unused mfns. */
+    while ( mfn < hvmemul_ctxt->mfn + ARRAY_SIZE(hvmemul_ctxt->mfn) )
+    {
+        ASSERT(mfn_eq(*mfn, INVALID_MFN));
+        *mfn++ = _mfn(0);
+    }
+#endif
+}
+
+/*
  * Convert addr from linear to physical form, valid over the range
  * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
  * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
@@ -988,11 +1141,11 @@ static int hvmemul_write(
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     struct vcpu *curr = current;
-    pagefault_info_t pfinfo;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
     struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
     int rc;
+    void *mapping;
 
     if ( is_x86_system_segment(seg) )
         pfec |= PFEC_implicit;
@@ -1008,23 +1161,13 @@ static int hvmemul_write(
          (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
-    rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+    mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+    if ( IS_ERR(mapping) )
+        return ~PTR_ERR(mapping);
 
-    switch ( rc )
-    {
-    case HVMTRANS_okay:
-        break;
-    case HVMTRANS_bad_linear_to_gfn:
-        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
-        return X86EMUL_EXCEPTION;
-    case HVMTRANS_bad_gfn_to_mfn:
-        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
-    case HVMTRANS_gfn_paged_out:
-    case HVMTRANS_gfn_shared:
-        return X86EMUL_RETRY;
-    default:
-        return X86EMUL_UNHANDLEABLE;
-    }
+    memcpy(mapping, p_data, bytes);
+
+    hvmemul_unmap_linear_addr(mapping, addr, bytes, hvmemul_ctxt);
 
     return X86EMUL_OKAY;
 }
diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-x86/hvm/emulate.h
index 8864775..d379a4a 100644
--- a/xen/include/asm-x86/hvm/emulate.h
+++ b/xen/include/asm-x86/hvm/emulate.h
@@ -37,6 +37,13 @@ struct hvm_emulate_ctxt {
     unsigned long seg_reg_accessed;
     unsigned long seg_reg_dirty;
 
+    /*
+     * MFNs behind temporary mappings in the write callback.  The length is
+     * arbitrary, and can be increased if writes longer than PAGE_SIZE+1 are
+     * needed.
+     */
+    mfn_t mfn[2];
+
     uint32_t intr_shadow;
 
     bool_t set_context;
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result
  2017-09-19 14:14 ` [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result Alexandru Isaila
@ 2017-09-19 14:16   ` Paul Durrant
  2017-09-19 14:39   ` Jan Beulich
  1 sibling, 0 replies; 12+ messages in thread
From: Paul Durrant @ 2017-09-19 14:16 UTC (permalink / raw)
  To: 'Alexandru Isaila', xen-devel
  Cc: Kevin Tian, sstabellini, Wei Liu, suravee.suthikulpanit,
	jun.nakajima, Andrew Cooper, Tim (Xen.org),
	George Dunlap, jbeulich, Ian Jackson, boris.ostrovsky

> -----Original Message-----
> From: Alexandru Isaila [mailto:aisaila@bitdefender.com]
> Sent: 19 September 2017 15:14
> To: xen-devel@lists.xen.org
> Cc: Tim (Xen.org) <tim@xen.org>; George Dunlap
> <George.Dunlap@citrix.com>; jbeulich@suse.com; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>;
> konrad.wilk@oracle.com; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>;
> boris.ostrovsky@oracle.com; suravee.suthikulpanit@amd.com;
> jun.nakajima@intel.com; Kevin Tian <kevin.tian@intel.com>
> Subject: [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to
> hvm_translation_result
> 
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> ---
> Acked-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
> Acked-by: George Dunlap <george.dunlap@citrix.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> ---
>  xen/arch/x86/hvm/dom0_build.c     |  2 +-
>  xen/arch/x86/hvm/emulate.c        | 40 ++++++++++++++--------------
>  xen/arch/x86/hvm/hvm.c            | 56 +++++++++++++++++++------------------
> --
>  xen/arch/x86/hvm/intercept.c      | 20 +++++++-------
>  xen/arch/x86/hvm/svm/nestedsvm.c  |  5 ++--
>  xen/arch/x86/hvm/svm/svm.c        |  2 +-
>  xen/arch/x86/hvm/viridian.c       |  2 +-
>  xen/arch/x86/hvm/vmsi.c           |  2 +-
>  xen/arch/x86/hvm/vmx/realmode.c   |  2 +-
>  xen/arch/x86/hvm/vmx/vvmx.c       | 14 +++++-----
>  xen/arch/x86/mm/shadow/common.c   | 12 ++++-----
>  xen/common/libelf/libelf-loader.c |  4 +--
>  xen/include/asm-x86/hvm/support.h | 40 ++++++++++++++--------------
>  13 files changed, 101 insertions(+), 100 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/dom0_build.c
> b/xen/arch/x86/hvm/dom0_build.c
> index 020c355..e8f746c 100644
> --- a/xen/arch/x86/hvm/dom0_build.c
> +++ b/xen/arch/x86/hvm/dom0_build.c
> @@ -238,7 +238,7 @@ static int __init
> pvh_setup_vmx_realmode_helpers(struct domain *d)
>      if ( !pvh_steal_ram(d, HVM_VM86_TSS_SIZE, 128, GB(4), &gaddr) )
>      {
>          if ( hvm_copy_to_guest_phys(gaddr, NULL, HVM_VM86_TSS_SIZE, v) !=
> -             HVMCOPY_okay )
> +             HVMTRANS_okay )
>              printk("Unable to zero VM86 TSS area\n");
>          d->arch.hvm_domain.params[HVM_PARAM_VM86_TSS_SIZED] =
>              VM86_TSS_UPDATED | ((uint64_t)HVM_VM86_TSS_SIZE << 32) |
> gaddr;
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index 54811c1..cc874ce 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -100,7 +100,7 @@ static int ioreq_server_read(const struct
> hvm_io_handler *io_handler,
>                      uint32_t size,
>                      uint64_t *data)
>  {
> -    if ( hvm_copy_from_guest_phys(data, addr, size) != HVMCOPY_okay )
> +    if ( hvm_copy_from_guest_phys(data, addr, size) != HVMTRANS_okay )
>          return X86EMUL_UNHANDLEABLE;
> 
>      return X86EMUL_OKAY;
> @@ -893,18 +893,18 @@ static int __hvmemul_read(
> 
>      switch ( rc )
>      {
> -    case HVMCOPY_okay:
> +    case HVMTRANS_okay:
>          break;
> -    case HVMCOPY_bad_gva_to_gfn:
> +    case HVMTRANS_bad_linear_to_gfn:
>          x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
> -    case HVMCOPY_bad_gfn_to_mfn:
> +    case HVMTRANS_bad_gfn_to_mfn:
>          if ( access_type == hvm_access_insn_fetch )
>              return X86EMUL_UNHANDLEABLE;
> 
>          return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> -    case HVMCOPY_gfn_paged_out:
> -    case HVMCOPY_gfn_shared:
> +    case HVMTRANS_gfn_paged_out:
> +    case HVMTRANS_gfn_shared:
>          return X86EMUL_RETRY;
>      default:
>          return X86EMUL_UNHANDLEABLE;
> @@ -1012,15 +1012,15 @@ static int hvmemul_write(
> 
>      switch ( rc )
>      {
> -    case HVMCOPY_okay:
> +    case HVMTRANS_okay:
>          break;
> -    case HVMCOPY_bad_gva_to_gfn:
> +    case HVMTRANS_bad_linear_to_gfn:
>          x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
> -    case HVMCOPY_bad_gfn_to_mfn:
> +    case HVMTRANS_bad_gfn_to_mfn:
>          return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> -    case HVMCOPY_gfn_paged_out:
> -    case HVMCOPY_gfn_shared:
> +    case HVMTRANS_gfn_paged_out:
> +    case HVMTRANS_gfn_shared:
>          return X86EMUL_RETRY;
>      default:
>          return X86EMUL_UNHANDLEABLE;
> @@ -1384,7 +1384,7 @@ static int hvmemul_rep_movs(
>              return rc;
>          }
> 
> -        rc = HVMCOPY_okay;
> +        rc = HVMTRANS_okay;
>      }
>      else
>          /*
> @@ -1394,16 +1394,16 @@ static int hvmemul_rep_movs(
>           */
>          rc = hvm_copy_from_guest_phys(buf, sgpa, bytes);
> 
> -    if ( rc == HVMCOPY_okay )
> +    if ( rc == HVMTRANS_okay )
>          rc = hvm_copy_to_guest_phys(dgpa, buf, bytes, current);
> 
>      xfree(buf);
> 
> -    if ( rc == HVMCOPY_gfn_paged_out )
> +    if ( rc == HVMTRANS_gfn_paged_out )
>          return X86EMUL_RETRY;
> -    if ( rc == HVMCOPY_gfn_shared )
> +    if ( rc == HVMTRANS_gfn_shared )
>          return X86EMUL_RETRY;
> -    if ( rc != HVMCOPY_okay )
> +    if ( rc != HVMTRANS_okay )
>      {
>          gdprintk(XENLOG_WARNING, "Failed memory-to-memory REP MOVS:
> sgpa=%"
>                   PRIpaddr" dgpa=%"PRIpaddr" reps=%lu bytes_per_rep=%u\n",
> @@ -1513,10 +1513,10 @@ static int hvmemul_rep_stos(
> 
>          switch ( rc )
>          {
> -        case HVMCOPY_gfn_paged_out:
> -        case HVMCOPY_gfn_shared:
> +        case HVMTRANS_gfn_paged_out:
> +        case HVMTRANS_gfn_shared:
>              return X86EMUL_RETRY;
> -        case HVMCOPY_okay:
> +        case HVMTRANS_okay:
>              return X86EMUL_OKAY;
>          }
> 
> @@ -2172,7 +2172,7 @@ void hvm_emulate_init_per_insn(
>                                          &addr) &&
>               hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
>                                           sizeof(hvmemul_ctxt->insn_buf),
> -                                         pfec, NULL) == HVMCOPY_okay) ?
> +                                         pfec, NULL) == HVMTRANS_okay) ?
>              sizeof(hvmemul_ctxt->insn_buf) : 0;
>      }
>      else
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 6cb903d..488acbf 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2915,9 +2915,9 @@ void hvm_task_switch(
> 
>      rc = hvm_copy_from_guest_linear(
>          &tss, prev_tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
> -    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +    if ( rc == HVMTRANS_bad_linear_to_gfn )
>          hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
> -    if ( rc != HVMCOPY_okay )
> +    if ( rc != HVMTRANS_okay )
>          goto out;
> 
>      eflags = regs->eflags;
> @@ -2955,20 +2955,20 @@ void hvm_task_switch(
>                                    offsetof(typeof(tss), trace) -
>                                    offsetof(typeof(tss), eip),
>                                    PFEC_page_present, &pfinfo);
> -    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +    if ( rc == HVMTRANS_bad_linear_to_gfn )
>          hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
> -    if ( rc != HVMCOPY_okay )
> +    if ( rc != HVMTRANS_okay )
>          goto out;
> 
>      rc = hvm_copy_from_guest_linear(
>          &tss, tr.base, sizeof(tss), PFEC_page_present, &pfinfo);
> -    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +    if ( rc == HVMTRANS_bad_linear_to_gfn )
>          hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>      /*
> -     * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee
> +     * Note: The HVMTRANS_gfn_shared case could be optimised, if the
> callee
>       * functions knew we want RO access.
>       */
> -    if ( rc != HVMCOPY_okay )
> +    if ( rc != HVMTRANS_okay )
>          goto out;
> 
>      new_cpl = tss.eflags & X86_EFLAGS_VM ? 3 : tss.cs & 3;
> @@ -3010,12 +3010,12 @@ void hvm_task_switch(
>          rc = hvm_copy_to_guest_linear(tr.base + offsetof(typeof(tss),
> back_link),
>                                        &tss.back_link, sizeof(tss.back_link), 0,
>                                        &pfinfo);
> -        if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        if ( rc == HVMTRANS_bad_linear_to_gfn )
>          {
>              hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>              exn_raised = 1;
>          }
> -        else if ( rc != HVMCOPY_okay )
> +        else if ( rc != HVMTRANS_okay )
>              goto out;
>      }
> 
> @@ -3051,12 +3051,12 @@ void hvm_task_switch(
>          {
>              rc = hvm_copy_to_guest_linear(linear_addr, &errcode, opsz, 0,
>                                            &pfinfo);
> -            if ( rc == HVMCOPY_bad_gva_to_gfn )
> +            if ( rc == HVMTRANS_bad_linear_to_gfn )
>              {
>                  hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
>                  exn_raised = 1;
>              }
> -            else if ( rc != HVMCOPY_okay )
> +            else if ( rc != HVMTRANS_okay )
>                  goto out;
>          }
>      }
> @@ -3073,7 +3073,7 @@ void hvm_task_switch(
>  #define HVMCOPY_to_guest   (1u<<0)
>  #define HVMCOPY_phys       (0u<<2)
>  #define HVMCOPY_linear     (1u<<2)
> -static enum hvm_copy_result __hvm_copy(
> +static enum hvm_translation_result __hvm_copy(
>      void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
>      uint32_t pfec, pagefault_info_t *pfinfo)
>  {
> @@ -3098,7 +3098,7 @@ static enum hvm_copy_result __hvm_copy(
>       * Hence we bail immediately if called from atomic context.
>       */
>      if ( in_atomic() )
> -        return HVMCOPY_unhandleable;
> +        return HVMTRANS_unhandleable;
>  #endif
> 
>      while ( todo > 0 )
> @@ -3113,15 +3113,15 @@ static enum hvm_copy_result __hvm_copy(
>              if ( gfn == gfn_x(INVALID_GFN) )
>              {
>                  if ( pfec & PFEC_page_paged )
> -                    return HVMCOPY_gfn_paged_out;
> +                    return HVMTRANS_gfn_paged_out;
>                  if ( pfec & PFEC_page_shared )
> -                    return HVMCOPY_gfn_shared;
> +                    return HVMTRANS_gfn_shared;
>                  if ( pfinfo )
>                  {
>                      pfinfo->linear = addr;
>                      pfinfo->ec = pfec & ~PFEC_implicit;
>                  }
> -                return HVMCOPY_bad_gva_to_gfn;
> +                return HVMTRANS_bad_linear_to_gfn;
>              }
>              gpa |= (paddr_t)gfn << PAGE_SHIFT;
>          }
> @@ -3139,28 +3139,28 @@ static enum hvm_copy_result __hvm_copy(
>          if ( v == current
>               && !nestedhvm_vcpu_in_guestmode(v)
>               && hvm_mmio_internal(gpa) )
> -            return HVMCOPY_bad_gfn_to_mfn;
> +            return HVMTRANS_bad_gfn_to_mfn;
> 
>          page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
> 
>          if ( !page )
> -            return HVMCOPY_bad_gfn_to_mfn;
> +            return HVMTRANS_bad_gfn_to_mfn;
> 
>          if ( p2m_is_paging(p2mt) )
>          {
>              put_page(page);
>              p2m_mem_paging_populate(v->domain, gfn);
> -            return HVMCOPY_gfn_paged_out;
> +            return HVMTRANS_gfn_paged_out;
>          }
>          if ( p2m_is_shared(p2mt) )
>          {
>              put_page(page);
> -            return HVMCOPY_gfn_shared;
> +            return HVMTRANS_gfn_shared;
>          }
>          if ( p2m_is_grant(p2mt) )
>          {
>              put_page(page);
> -            return HVMCOPY_unhandleable;
> +            return HVMTRANS_unhandleable;
>          }
> 
>          p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
> @@ -3198,24 +3198,24 @@ static enum hvm_copy_result __hvm_copy(
>          put_page(page);
>      }
> 
> -    return HVMCOPY_okay;
> +    return HVMTRANS_okay;
>  }
> 
> -enum hvm_copy_result hvm_copy_to_guest_phys(
> +enum hvm_translation_result hvm_copy_to_guest_phys(
>      paddr_t paddr, void *buf, int size, struct vcpu *v)
>  {
>      return __hvm_copy(buf, paddr, size, v,
>                        HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL);
>  }
> 
> -enum hvm_copy_result hvm_copy_from_guest_phys(
> +enum hvm_translation_result hvm_copy_from_guest_phys(
>      void *buf, paddr_t paddr, int size)
>  {
>      return __hvm_copy(buf, paddr, size, current,
>                        HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
>  }
> 
> -enum hvm_copy_result hvm_copy_to_guest_linear(
> +enum hvm_translation_result hvm_copy_to_guest_linear(
>      unsigned long addr, void *buf, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> @@ -3224,7 +3224,7 @@ enum hvm_copy_result
> hvm_copy_to_guest_linear(
>                        PFEC_page_present | PFEC_write_access | pfec, pfinfo);
>  }
> 
> -enum hvm_copy_result hvm_copy_from_guest_linear(
> +enum hvm_translation_result hvm_copy_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> @@ -3233,7 +3233,7 @@ enum hvm_copy_result
> hvm_copy_from_guest_linear(
>                        PFEC_page_present | pfec, pfinfo);
>  }
> 
> -enum hvm_copy_result hvm_fetch_from_guest_linear(
> +enum hvm_translation_result hvm_fetch_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo)
>  {
> @@ -3670,7 +3670,7 @@ void hvm_ud_intercept(struct cpu_user_regs
> *regs)
>                                          sizeof(sig), hvm_access_insn_fetch,
>                                          cs, &addr) &&
>               (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
> -                                          walk, NULL) == HVMCOPY_okay) &&
> +                                          walk, NULL) == HVMTRANS_okay) &&
>               (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
>          {
>              regs->rip += sizeof(sig);
> diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c
> index e51efd5..ef82419 100644
> --- a/xen/arch/x86/hvm/intercept.c
> +++ b/xen/arch/x86/hvm/intercept.c
> @@ -136,14 +136,14 @@ int hvm_process_io_intercept(const struct
> hvm_io_handler *handler,
>                  switch ( hvm_copy_to_guest_phys(p->data + step * i,
>                                                  &data, p->size, current) )
>                  {
> -                case HVMCOPY_okay:
> +                case HVMTRANS_okay:
>                      break;
> -                case HVMCOPY_bad_gfn_to_mfn:
> +                case HVMTRANS_bad_gfn_to_mfn:
>                      /* Drop the write as real hardware would. */
>                      continue;
> -                case HVMCOPY_bad_gva_to_gfn:
> -                case HVMCOPY_gfn_paged_out:
> -                case HVMCOPY_gfn_shared:
> +                case HVMTRANS_bad_linear_to_gfn:
> +                case HVMTRANS_gfn_paged_out:
> +                case HVMTRANS_gfn_shared:
>                      ASSERT_UNREACHABLE();
>                      /* fall through */
>                  default:
> @@ -164,14 +164,14 @@ int hvm_process_io_intercept(const struct
> hvm_io_handler *handler,
>                  switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
>                                                    p->size) )
>                  {
> -                case HVMCOPY_okay:
> +                case HVMTRANS_okay:
>                      break;
> -                case HVMCOPY_bad_gfn_to_mfn:
> +                case HVMTRANS_bad_gfn_to_mfn:
>                      data = ~0;
>                      break;
> -                case HVMCOPY_bad_gva_to_gfn:
> -                case HVMCOPY_gfn_paged_out:
> -                case HVMCOPY_gfn_shared:
> +                case HVMTRANS_bad_linear_to_gfn:
> +                case HVMTRANS_gfn_paged_out:
> +                case HVMTRANS_gfn_shared:
>                      ASSERT_UNREACHABLE();
>                      /* fall through */
>                  default:
> diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c
> b/xen/arch/x86/hvm/svm/nestedsvm.c
> index 8fd9c23..66a1777 100644
> --- a/xen/arch/x86/hvm/svm/nestedsvm.c
> +++ b/xen/arch/x86/hvm/svm/nestedsvm.c
> @@ -357,7 +357,7 @@ static int nsvm_vmrun_permissionmap(struct vcpu
> *v, bool_t viopm)
>      struct vmcb_struct *host_vmcb = arch_svm->vmcb;
>      unsigned long *ns_msrpm_ptr;
>      unsigned int i;
> -    enum hvm_copy_result ret;
> +    enum hvm_translation_result ret;
>      unsigned long *ns_viomap;
>      bool_t ioport_80 = 1, ioport_ed = 1;
> 
> @@ -365,7 +365,8 @@ static int nsvm_vmrun_permissionmap(struct vcpu
> *v, bool_t viopm)
> 
>      ret = hvm_copy_from_guest_phys(svm->ns_cached_msrpm,
>                                     ns_vmcb->_msrpm_base_pa, MSRPM_SIZE);
> -    if (ret != HVMCOPY_okay) {
> +    if ( ret != HVMTRANS_okay )
> +    {
>          gdprintk(XENLOG_ERR, "hvm_copy_from_guest_phys msrpm %u\n",
> ret);
>          return 1;
>      }
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 6b19b16..12ddc8a 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -1266,7 +1266,7 @@ static void svm_emul_swint_injection(struct
> x86_event *event)
>                                      PFEC_implicit, &pfinfo);
>      if ( rc )
>      {
> -        if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        if ( rc == HVMTRANS_bad_linear_to_gfn )
>          {
>              fault = TRAP_page_fault;
>              ec = pfinfo.ec;
> diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
> index e0546f3..f0fa59d 100644
> --- a/xen/arch/x86/hvm/viridian.c
> +++ b/xen/arch/x86/hvm/viridian.c
> @@ -914,7 +914,7 @@ int viridian_hypercall(struct cpu_user_regs *regs)
> 
>          /* Get input parameters. */
>          if ( hvm_copy_from_guest_phys(&input_params, input_params_gpa,
> -                                      sizeof(input_params)) != HVMCOPY_okay )
> +                                      sizeof(input_params)) != HVMTRANS_okay )
>              break;
> 
>          /*
> diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c
> index 9b35e9b..7126de7 100644
> --- a/xen/arch/x86/hvm/vmsi.c
> +++ b/xen/arch/x86/hvm/vmsi.c
> @@ -609,7 +609,7 @@ void msix_write_completion(struct vcpu *v)
>          if ( desc &&
>               hvm_copy_from_guest_phys(&data,
>                                        v->arch.hvm_vcpu.hvm_io.msix_snoop_gpa,
> -                                      sizeof(data)) == HVMCOPY_okay &&
> +                                      sizeof(data)) == HVMTRANS_okay &&
>               !(data & PCI_MSIX_VECTOR_BITMASK) )
>              ctrl_address = snoop_addr;
>      }
> diff --git a/xen/arch/x86/hvm/vmx/realmode.c
> b/xen/arch/x86/hvm/vmx/realmode.c
> index 11bde58..12d43ad 100644
> --- a/xen/arch/x86/hvm/vmx/realmode.c
> +++ b/xen/arch/x86/hvm/vmx/realmode.c
> @@ -40,7 +40,7 @@ static void realmode_deliver_exception(
>      last_byte = (vector * 4) + 3;
>      if ( idtr->limit < last_byte ||
>           hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) !=
> -         HVMCOPY_okay )
> +         HVMTRANS_okay )
>      {
>          /* Software interrupt? */
>          if ( insn_len != 0 )
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c
> b/xen/arch/x86/hvm/vmx/vvmx.c
> index e2361a1..cd0ee0a 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -481,9 +481,9 @@ static int decode_vmx_inst(struct cpu_user_regs
> *regs,
>              int rc = hvm_copy_from_guest_linear(poperandS, base, size,
>                                                  0, &pfinfo);
> 
> -            if ( rc == HVMCOPY_bad_gva_to_gfn )
> +            if ( rc == HVMTRANS_bad_linear_to_gfn )
>                  hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
> -            if ( rc != HVMCOPY_okay )
> +            if ( rc != HVMTRANS_okay )
>                  return X86EMUL_EXCEPTION;
>          }
>          decode->mem = base;
> @@ -1468,7 +1468,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs
> *regs)
>      }
> 
>      rc = hvm_copy_from_guest_phys(&nvmcs_revid, gpa,
> sizeof(nvmcs_revid));
> -    if ( rc != HVMCOPY_okay ||
> +    if ( rc != HVMTRANS_okay ||
>           (nvmcs_revid & ~VMX_BASIC_REVISION_MASK) ||
>           ((nvmcs_revid ^ vmx_basic_msr) & VMX_BASIC_REVISION_MASK) )
>      {
> @@ -1746,9 +1746,9 @@ int nvmx_handle_vmptrst(struct cpu_user_regs
> *regs)
>      gpa = nvcpu->nv_vvmcxaddr;
> 
>      rc = hvm_copy_to_guest_linear(decode.mem, &gpa, decode.len, 0,
> &pfinfo);
> -    if ( rc == HVMCOPY_bad_gva_to_gfn )
> +    if ( rc == HVMTRANS_bad_linear_to_gfn )
>          hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
> -    if ( rc != HVMCOPY_okay )
> +    if ( rc != HVMTRANS_okay )
>          return X86EMUL_EXCEPTION;
> 
>      vmsucceed(regs);
> @@ -1835,9 +1835,9 @@ int nvmx_handle_vmread(struct cpu_user_regs
> *regs)
>      switch ( decode.type ) {
>      case VMX_INST_MEMREG_TYPE_MEMORY:
>          rc = hvm_copy_to_guest_linear(decode.mem, &value, decode.len, 0,
> &pfinfo);
> -        if ( rc == HVMCOPY_bad_gva_to_gfn )
> +        if ( rc == HVMTRANS_bad_linear_to_gfn )
>              hvm_inject_page_fault(pfinfo.ec, pfinfo.linear);
> -        if ( rc != HVMCOPY_okay )
> +        if ( rc != HVMTRANS_okay )
>              return X86EMUL_EXCEPTION;
>          break;
>      case VMX_INST_MEMREG_TYPE_REG:
> diff --git a/xen/arch/x86/mm/shadow/common.c
> b/xen/arch/x86/mm/shadow/common.c
> index 3926ed6..8b9310c 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -196,16 +196,16 @@ hvm_read(enum x86_segment seg,
> 
>      switch ( rc )
>      {
> -    case HVMCOPY_okay:
> +    case HVMTRANS_okay:
>          return X86EMUL_OKAY;
> -    case HVMCOPY_bad_gva_to_gfn:
> +    case HVMTRANS_bad_linear_to_gfn:
>          x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &sh_ctxt->ctxt);
>          return X86EMUL_EXCEPTION;
> -    case HVMCOPY_bad_gfn_to_mfn:
> -    case HVMCOPY_unhandleable:
> +    case HVMTRANS_bad_gfn_to_mfn:
> +    case HVMTRANS_unhandleable:
>          return X86EMUL_UNHANDLEABLE;
> -    case HVMCOPY_gfn_paged_out:
> -    case HVMCOPY_gfn_shared:
> +    case HVMTRANS_gfn_paged_out:
> +    case HVMTRANS_gfn_shared:
>          return X86EMUL_RETRY;
>      }
> 
> diff --git a/xen/common/libelf/libelf-loader.c b/xen/common/libelf/libelf-
> loader.c
> index c8b7ec9..0f46872 100644
> --- a/xen/common/libelf/libelf-loader.c
> +++ b/xen/common/libelf/libelf-loader.c
> @@ -154,10 +154,10 @@ static elf_errorstatus elf_memcpy(struct vcpu *v,
> void *dst, void *src,
>  #ifdef CONFIG_X86
>      if ( is_hvm_vcpu(v) )
>      {
> -        enum hvm_copy_result rc;
> +        enum hvm_translation_result rc;
> 
>          rc = hvm_copy_to_guest_phys((paddr_t)dst, src, size, v);
> -        return rc != HVMCOPY_okay ? -1 : 0;
> +        return rc != HVMTRANS_okay ? -1 : 0;
>      }
>  #endif
> 
> diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> index b18dbb6..e3b035d 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -53,23 +53,23 @@ extern unsigned int opt_hvm_debug_level;
> 
>  extern unsigned long hvm_io_bitmap[];
> 
> -enum hvm_copy_result {
> -    HVMCOPY_okay = 0,
> -    HVMCOPY_bad_gva_to_gfn,
> -    HVMCOPY_bad_gfn_to_mfn,
> -    HVMCOPY_unhandleable,
> -    HVMCOPY_gfn_paged_out,
> -    HVMCOPY_gfn_shared,
> +enum hvm_translation_result {
> +    HVMTRANS_okay,
> +    HVMTRANS_bad_linear_to_gfn,
> +    HVMTRANS_bad_gfn_to_mfn,
> +    HVMTRANS_unhandleable,
> +    HVMTRANS_gfn_paged_out,
> +    HVMTRANS_gfn_shared,
>  };
> 
>  /*
>   * Copy to/from a guest physical address.
> - * Returns HVMCOPY_okay, else HVMCOPY_bad_gfn_to_mfn if the given
> physical
> + * Returns HVMTRANS_okay, else HVMTRANS_bad_gfn_to_mfn if the
> given physical
>   * address range does not map entirely onto ordinary machine memory.
>   */
> -enum hvm_copy_result hvm_copy_to_guest_phys(
> +enum hvm_translation_result hvm_copy_to_guest_phys(
>      paddr_t paddr, void *buf, int size, struct vcpu *v);
> -enum hvm_copy_result hvm_copy_from_guest_phys(
> +enum hvm_translation_result hvm_copy_from_guest_phys(
>      void *buf, paddr_t paddr, int size);
> 
>  /*
> @@ -79,13 +79,13 @@ enum hvm_copy_result
> hvm_copy_from_guest_phys(
>   * to set them.
>   *
>   * Returns:
> - *  HVMCOPY_okay: Copy was entirely successful.
> - *  HVMCOPY_bad_gfn_to_mfn: Some guest physical address did not map
> to
> - *                          ordinary machine memory.
> - *  HVMCOPY_bad_gva_to_gfn: Some guest virtual address did not have a
> valid
> - *                          mapping to a guest physical address.  The
> - *                          pagefault_info_t structure will be filled in if
> - *                          provided.
> + *  HVMTRANS_okay: Copy was entirely successful.
> + *  HVMTRANS_bad_gfn_to_mfn: Some guest physical address did not map
> to
> + *                           ordinary machine memory.
> + *  HVMTRANS_bad_linear_to_gfn: Some guest linear address did not have
> a
> + *                              valid mapping to a guest physical address.
> + *                              The pagefault_info_t structure will be filled
> + *                              in if provided.
>   */
>  typedef struct pagefault_info
>  {
> @@ -93,13 +93,13 @@ typedef struct pagefault_info
>      int ec;
>  } pagefault_info_t;
> 
> -enum hvm_copy_result hvm_copy_to_guest_linear(
> +enum hvm_translation_result hvm_copy_to_guest_linear(
>      unsigned long addr, void *buf, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> -enum hvm_copy_result hvm_copy_from_guest_linear(
> +enum hvm_translation_result hvm_copy_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> -enum hvm_copy_result hvm_fetch_from_guest_linear(
> +enum hvm_translation_result hvm_fetch_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> 
> --
> 2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic
  2017-09-19 14:14 ` [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic Alexandru Isaila
@ 2017-09-19 14:27   ` Paul Durrant
  2017-09-19 14:35     ` Jan Beulich
  0 siblings, 1 reply; 12+ messages in thread
From: Paul Durrant @ 2017-09-19 14:27 UTC (permalink / raw)
  To: 'Alexandru Isaila', xen-devel
  Cc: Kevin Tian, sstabellini, Wei Liu, suravee.suthikulpanit,
	jun.nakajima, Andrew Cooper, Tim (Xen.org),
	George Dunlap, jbeulich, Ian Jackson, boris.ostrovsky

> -----Original Message-----
> From: Alexandru Isaila [mailto:aisaila@bitdefender.com]
> Sent: 19 September 2017 15:14
> To: xen-devel@lists.xen.org
> Cc: Tim (Xen.org) <tim@xen.org>; George Dunlap
> <George.Dunlap@citrix.com>; jbeulich@suse.com; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>;
> konrad.wilk@oracle.com; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>;
> boris.ostrovsky@oracle.com; suravee.suthikulpanit@amd.com;
> jun.nakajima@intel.com; Kevin Tian <kevin.tian@intel.com>; Alexandru Isaila
> <aisaila@bitdefender.com>
> Subject: [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic
> 
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> It will be reused by later changes.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> 
> ---
> Changes since V2:
> 	- Changed _gfn() to gaddr_to_gfn
> 	- Changed gfn_x to gfn_to_gaddr
> ---
>  xen/arch/x86/hvm/hvm.c            | 144 +++++++++++++++++++++++-----------
> ----
>  xen/include/asm-x86/hvm/support.h |  12 ++++
>  2 files changed, 98 insertions(+), 58 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 488acbf..93394c1 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3069,6 +3069,83 @@ void hvm_task_switch(
>      hvm_unmap_entry(nptss_desc);
>  }
> 
> +enum hvm_translation_result hvm_translate_get_page(
> +    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
> +    pagefault_info_t *pfinfo, struct page_info **page_p,
> +    gfn_t *gfn_p, p2m_type_t *p2mt_p)
> +{
> +    struct page_info *page;
> +    p2m_type_t p2mt;
> +    gfn_t gfn;
> +
> +    if ( linear )
> +    {
> +        gfn = _gfn(paging_gva_to_gfn(v, addr, &pfec));
> +
> +        if ( gfn_eq(gfn, INVALID_GFN) )
> +        {
> +            if ( pfec & PFEC_page_paged )
> +                return HVMTRANS_gfn_paged_out;
> +
> +            if ( pfec & PFEC_page_shared )
> +                return HVMTRANS_gfn_shared;
> +
> +            if ( pfinfo )
> +            {
> +                pfinfo->linear = addr;
> +                pfinfo->ec = pfec & ~PFEC_implicit;
> +            }
> +
> +            return HVMTRANS_bad_linear_to_gfn;
> +        }
> +    }
> +    else
> +    {
> +        gfn = gaddr_to_gfn(addr);
> +        ASSERT(!pfinfo);
> +    }
> +
> +    /*
> +     * No need to do the P2M lookup for internally handled MMIO, benefiting
> +     * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
> +     * - newer Windows (like Server 2012) for HPET accesses.
> +     */
> +    if ( v == current
> +         && !nestedhvm_vcpu_in_guestmode(v)
> +         && hvm_mmio_internal(gfn_to_gaddr(gfn)) )
> +        return HVMTRANS_bad_gfn_to_mfn;
> +
> +    page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt,
> P2M_UNSHARE);
> +
> +    if ( !page )
> +        return HVMTRANS_bad_gfn_to_mfn;
> +
> +    if ( p2m_is_paging(p2mt) )
> +    {
> +        put_page(page);
> +        p2m_mem_paging_populate(v->domain, gfn_x(gfn));
> +        return HVMTRANS_gfn_paged_out;
> +    }
> +    if ( p2m_is_shared(p2mt) )
> +    {
> +        put_page(page);
> +        return HVMTRANS_gfn_shared;
> +    }
> +    if ( p2m_is_grant(p2mt) )
> +    {
> +        put_page(page);
> +        return HVMTRANS_unhandleable;
> +    }
> +
> +    *page_p = page;
> +    if ( gfn_p )
> +        *gfn_p = gfn;
> +    if ( p2mt_p )
> +        *p2mt_p = p2mt;
> +
> +    return HVMTRANS_okay;
> +}
> +
>  #define HVMCOPY_from_guest (0u<<0)
>  #define HVMCOPY_to_guest   (1u<<0)
>  #define HVMCOPY_phys       (0u<<2)
> @@ -3077,7 +3154,7 @@ static enum hvm_translation_result __hvm_copy(
>      void *buf, paddr_t addr, int size, struct vcpu *v, unsigned int flags,
>      uint32_t pfec, pagefault_info_t *pfinfo)
>  {
> -    unsigned long gfn;
> +    gfn_t gfn;
>      struct page_info *page;
>      p2m_type_t p2mt;
>      char *p;
> @@ -3103,65 +3180,15 @@ static enum hvm_translation_result
> __hvm_copy(
> 
>      while ( todo > 0 )
>      {
> +        enum hvm_translation_result res;
>          paddr_t gpa = addr & ~PAGE_MASK;
> 
>          count = min_t(int, PAGE_SIZE - gpa, todo);
> 
> -        if ( flags & HVMCOPY_linear )
> -        {
> -            gfn = paging_gva_to_gfn(v, addr, &pfec);
> -            if ( gfn == gfn_x(INVALID_GFN) )
> -            {
> -                if ( pfec & PFEC_page_paged )
> -                    return HVMTRANS_gfn_paged_out;
> -                if ( pfec & PFEC_page_shared )
> -                    return HVMTRANS_gfn_shared;
> -                if ( pfinfo )
> -                {
> -                    pfinfo->linear = addr;
> -                    pfinfo->ec = pfec & ~PFEC_implicit;
> -                }
> -                return HVMTRANS_bad_linear_to_gfn;
> -            }
> -            gpa |= (paddr_t)gfn << PAGE_SHIFT;
> -        }
> -        else
> -        {
> -            gfn = addr >> PAGE_SHIFT;
> -            gpa = addr;
> -        }
> -
> -        /*
> -         * No need to do the P2M lookup for internally handled MMIO,
> benefiting
> -         * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
> -         * - newer Windows (like Server 2012) for HPET accesses.
> -         */
> -        if ( v == current
> -             && !nestedhvm_vcpu_in_guestmode(v)
> -             && hvm_mmio_internal(gpa) )
> -            return HVMTRANS_bad_gfn_to_mfn;
> -
> -        page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE);
> -
> -        if ( !page )
> -            return HVMTRANS_bad_gfn_to_mfn;
> -
> -        if ( p2m_is_paging(p2mt) )
> -        {
> -            put_page(page);
> -            p2m_mem_paging_populate(v->domain, gfn);
> -            return HVMTRANS_gfn_paged_out;
> -        }
> -        if ( p2m_is_shared(p2mt) )
> -        {
> -            put_page(page);
> -            return HVMTRANS_gfn_shared;
> -        }
> -        if ( p2m_is_grant(p2mt) )
> -        {
> -            put_page(page);
> -            return HVMTRANS_unhandleable;
> -        }
> +        res = hvm_translate_get_page(v, addr, flags & HVMCOPY_linear,
> +                                     pfec, pfinfo, &page, &gfn, &p2mt);
> +        if ( res != HVMTRANS_okay )
> +            return res;
> 
>          p = (char *)__map_domain_page(page) + (addr & ~PAGE_MASK);
> 
> @@ -3170,10 +3197,11 @@ static enum hvm_translation_result
> __hvm_copy(
>              if ( p2m_is_discard_write(p2mt) )
>              {
>                  static unsigned long lastpage;
> -                if ( xchg(&lastpage, gfn) != gfn )
> +
> +                if ( xchg(&lastpage, gfn_x(gfn)) != gfn_x(gfn) )
>                      dprintk(XENLOG_G_DEBUG,
>                              "%pv attempted write to read-only gfn %#lx (mfn=%#lx)\n",
> -                            v, gfn, page_to_mfn(page));
> +                            v, gfn_x(gfn), page_to_mfn(page));
>              }
>              else
>              {
> diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-
> x86/hvm/support.h
> index e3b035d..d784fc1 100644
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -24,6 +24,7 @@
>  #include <xen/sched.h>
>  #include <asm/hvm/save.h>
>  #include <asm/processor.h>
> +#include <asm/p2m.h>
> 
>  #ifndef NDEBUG
>  #define DBG_LEVEL_0                 (1 << 0)
> @@ -103,6 +104,17 @@ enum hvm_translation_result
> hvm_fetch_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> 
> +/*
> + * Get a reference on the page under an HVM physical or linear address.  If
> + * linear, a pagewalk is performed using pfec (fault details optionally in
> + * pfinfo).
> + * On success, returns HVMTRANS_okay with a reference taken on
> **_page.
> + */
> +enum hvm_translation_result hvm_translate_get_page(
> +    struct vcpu *v, unsigned long addr, bool linear, uint32_t pfec,
> +    pagefault_info_t *pfinfo, struct page_info **page_p,
> +    gfn_t *gfn_p, p2m_type_t *p2mt_p);
> +
>  #define HVM_HCALL_completed  0 /* hypercall completed - no further
> action */
>  #define HVM_HCALL_preempted  1 /* hypercall preempted - re-execute
> VMCALL */
>  int hvm_hypercall(struct cpu_user_regs *regs);
> --
> 2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic
  2017-09-19 14:27   ` Paul Durrant
@ 2017-09-19 14:35     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2017-09-19 14:35 UTC (permalink / raw)
  To: 'Alexandru Isaila'
  Cc: Kevin Tian, sstabellini, Wei Liu, jun.nakajima, Andrew Cooper,
	Tim (Xen.org),
	George Dunlap, xen-devel, Paul Durrant, suravee.suthikulpanit,
	Ian Jackson, boris.ostrovsky

>>> On 19.09.17 at 16:27, <Paul.Durrant@citrix.com> wrote:
>> From: Alexandru Isaila [mailto:aisaila@bitdefender.com]
>> Sent: 19 September 2017 15:14
>> Subject: [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic
>> 
>> From: Andrew Cooper <andrew.cooper3@citrix.com>
>> 
>> It will be reused by later changes.
>> 
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> 
> Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

Acked-by: Jan Beulich <jbeulich@suse.com>



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings
  2017-09-19 14:14 ` [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings Alexandru Isaila
@ 2017-09-19 14:38   ` Jan Beulich
  2017-09-19 14:39   ` Paul Durrant
  1 sibling, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2017-09-19 14:38 UTC (permalink / raw)
  To: Alexandru Isaila
  Cc: tim, kevin.tian, sstabellini, wei.liu2, jun.nakajima,
	george.dunlap, andrew.cooper3, ian.jackson, xen-devel,
	paul.durrant, suravee.suthikulpanit, boris.ostrovsky

>>> On 19.09.17 at 16:14, <aisaila@bitdefender.com> wrote:
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> An access which crosses a page boundary is performed atomically by x86
> hardware, albeit with a severe performance penalty.  An important corner 
> case
> is when a straddled access hits two pages which differ in whether a
> translation exists, or in net access rights.
> 
> The use of hvm_copy*() in hvmemul_write() is problematic, because it 
> performs
> a translation then completes the partial write, before moving onto the next
> translation.
> 
> If an individual emulated write straddles two pages, the first of which is
> writable, and the second of which is not, the first half of the write will
> complete before #PF is raised from the second half.
> 
> This results in guest state corruption as a side effect of emulation, which
> has been observed to cause windows to crash while under introspection.
> 
> Introduce the hvmemul_{,un}map_linear_addr() helpers, which translate an
> entire contents of a linear access, and vmap() the underlying frames to
> provide a contiguous virtual mapping for the emulator to use.  This is the
> same mechanism as used by the shadow emulation code.
> 
> This will catch any translation issues and abort the emulation before any
> modifications occur.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
despite me being unhappy about ...

> +static void hvmemul_unmap_linear_addr(
> +    void *mapping, unsigned long linear, unsigned int bytes,

... "mapping" still being non-const here.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings
  2017-09-19 14:14 ` [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings Alexandru Isaila
  2017-09-19 14:38   ` Jan Beulich
@ 2017-09-19 14:39   ` Paul Durrant
  2017-09-19 14:52     ` Jan Beulich
  1 sibling, 1 reply; 12+ messages in thread
From: Paul Durrant @ 2017-09-19 14:39 UTC (permalink / raw)
  To: 'Alexandru Isaila', xen-devel
  Cc: Kevin Tian, sstabellini, Wei Liu, suravee.suthikulpanit,
	jun.nakajima, Andrew Cooper, Tim (Xen.org),
	George Dunlap, jbeulich, Ian Jackson, boris.ostrovsky

> -----Original Message-----
> From: Alexandru Isaila [mailto:aisaila@bitdefender.com]
> Sent: 19 September 2017 15:14
> To: xen-devel@lists.xen.org
> Cc: Tim (Xen.org) <tim@xen.org>; George Dunlap
> <George.Dunlap@citrix.com>; jbeulich@suse.com; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>;
> konrad.wilk@oracle.com; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>;
> boris.ostrovsky@oracle.com; suravee.suthikulpanit@amd.com;
> jun.nakajima@intel.com; Kevin Tian <kevin.tian@intel.com>; Alexandru Isaila
> <aisaila@bitdefender.com>
> Subject: [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real
> mappings
> 
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> An access which crosses a page boundary is performed atomically by x86
> hardware, albeit with a severe performance penalty.  An important corner
> case
> is when a straddled access hits two pages which differ in whether a
> translation exists, or in net access rights.
> 
> The use of hvm_copy*() in hvmemul_write() is problematic, because it
> performs
> a translation then completes the partial write, before moving onto the next
> translation.
> 
> If an individual emulated write straddles two pages, the first of which is
> writable, and the second of which is not, the first half of the write will
> complete before #PF is raised from the second half.
> 
> This results in guest state corruption as a side effect of emulation, which
> has been observed to cause windows to crash while under introspection.
> 
> Introduce the hvmemul_{,un}map_linear_addr() helpers, which translate an
> entire contents of a linear access, and vmap() the underlying frames to
> provide a contiguous virtual mapping for the emulator to use.  This is the
> same mechanism as used by the shadow emulation code.
> 
> This will catch any translation issues and abort the emulation before any
> modifications occur.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
> 
> ---
> Changes since V2:
> 	- Added linear & ~PAGE_MASK to return statement
> 	- Modified mfn - hvmemul_ctxt->mfn to final - first + 1
> 	- Remove useless else statement
> ---
>  xen/arch/x86/hvm/emulate.c        | 177
> ++++++++++++++++++++++++++++++++++----
>  xen/include/asm-x86/hvm/emulate.h |   7 ++
>  2 files changed, 167 insertions(+), 17 deletions(-)
> 
> diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
> index cc874ce..5574698 100644
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -498,6 +498,159 @@ static int hvmemul_do_mmio_addr(paddr_t
> mmio_gpa,
>  }
> 
>  /*
> + * Map the frame(s) covering an individual linear access, for writeable
> + * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other
> errors
> + * including ERR_PTR(~X86EMUL_OKAY) for write-discard mappings.
> + *
> + * In debug builds, map() checks that each slot in hvmemul_ctxt->mfn[] is
> + * clean before use, and poisions unused slots with INVALID_MFN.
> + */
> +static void *hvmemul_map_linear_addr(
> +    unsigned long linear, unsigned int bytes, uint32_t pfec,
> +    struct hvm_emulate_ctxt *hvmemul_ctxt)
> +{
> +    struct vcpu *curr = current;
> +    void *err, *mapping;
> +
> +    /* First and final gfns which need mapping. */
> +    unsigned long frame = linear >> PAGE_SHIFT, first = frame;
> +    unsigned long final = (linear + bytes - !!bytes) >> PAGE_SHIFT;
> +
> +    /*
> +     * mfn points to the next free slot.  All used slots have a page reference
> +     * held on them.
> +     */
> +    mfn_t *mfn = &hvmemul_ctxt->mfn[0];
> +
> +    /*
> +     * The caller has no legitimate reason for trying a zero-byte write, but
> +     * final is calculate to fail safe in release builds.
> +     *
> +     * The maximum write size depends on the number of adjacent mfns[]
> which
> +     * can be vmap()'d, accouting for possible misalignment within the region.
> +     * The higher level emulation callers are responsible for ensuring that
> +     * mfns[] is large enough for the requested write size.
> +     */
> +    if ( bytes == 0 ||
> +         final - first > ARRAY_SIZE(hvmemul_ctxt->mfn) - 1 )

>=, rather than the -1?

> +    {
> +        ASSERT_UNREACHABLE();
> +        goto unhandleable;
> +    }
> +
> +    do {
> +        enum hvm_translation_result res;
> +        struct page_info *page;
> +        pagefault_info_t pfinfo;
> +        p2m_type_t p2mt;
> +
> +        /* Error checking.  Confirm that the current slot is clean. */
> +        ASSERT(mfn_x(*mfn) == 0);
> +
> +        res = hvm_translate_get_page(curr, frame << PAGE_SHIFT, true, pfec,
> +                                     &pfinfo, &page, NULL, &p2mt);
> +
> +        switch ( res )
> +        {
> +        case HVMTRANS_okay:
> +            break;
> +
> +        case HVMTRANS_bad_linear_to_gfn:
> +            x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> +            err = ERR_PTR(~(long)X86EMUL_EXCEPTION);

Still the mysterious cast to long here and below that Jan pointed out.

> +            goto out;
> +
> +        case HVMTRANS_bad_gfn_to_mfn:
> +            err = NULL;
> +            goto out;
> +
> +        case HVMTRANS_gfn_paged_out:
> +        case HVMTRANS_gfn_shared:
> +            err = ERR_PTR(~(long)X86EMUL_RETRY);
> +            goto out;
> +
> +        default:
> +            goto unhandleable;
> +        }
> +
> +        *mfn++ = _mfn(page_to_mfn(page));
> +        frame++;

Increment still done here rather than being co-located with test below.

> +
> +        if ( p2m_is_discard_write(p2mt) )
> +        {
> +            err = ERR_PTR(~(long)X86EMUL_OKAY);
> +            goto out;
> +        }
> +
> +    } while ( frame < final );
> +
> +    /* Entire access within a single frame? */
> +    if ( first == final )
> +        mapping = map_domain_page(hvmemul_ctxt->mfn[0]);
> +    /* Multiple frames? Need to vmap(). */
> +    else if ( (mapping = vmap(hvmemul_ctxt->mfn,
> +                              final - first + 1)) == NULL )
> +        goto unhandleable;
> +
> +#ifndef NDEBUG /* Poision unused mfn[]s with INVALID_MFN. */
> +    while ( mfn < hvmemul_ctxt->mfn + ARRAY_SIZE(hvmemul_ctxt->mfn) )
> +    {
> +        ASSERT(mfn_x(*mfn) == 0);
> +        *mfn++ = INVALID_MFN;
> +    }
> +#endif
> +
> +    return mapping + (linear & ~PAGE_MASK);
> +
> + unhandleable:
> +    err = ERR_PTR(~(long)X86EMUL_UNHANDLEABLE);
> +
> + out:
> +    /* Drop all held references. */
> +    while ( mfn-- > hvmemul_ctxt->mfn )
> +        put_page(mfn_to_page(mfn_x(*mfn)));
> +
> +    return err;
> +}
> +
> +static void hvmemul_unmap_linear_addr(
> +    void *mapping, unsigned long linear, unsigned int bytes,
> +    struct hvm_emulate_ctxt *hvmemul_ctxt)
> +{
> +    struct domain *currd = current->domain;
> +    unsigned long frame = linear >> PAGE_SHIFT;
> +    unsigned long final = (linear + bytes - !!bytes) >> PAGE_SHIFT;
> +    mfn_t *mfn = &hvmemul_ctxt->mfn[0];
> +
> +    ASSERT(bytes > 0);
> +
> +    if ( frame == final )
> +        unmap_domain_page(mapping);
> +    else
> +        vunmap(mapping);
> +
> +    do
> +    {
> +        ASSERT(mfn_valid(*mfn));
> +        paging_mark_dirty(currd, *mfn);
> +        put_page(mfn_to_page(mfn_x(*mfn)));
> +
> +        frame++;

Again, increment should be co-located with test IMO.

  Paul

> +        *mfn++ = _mfn(0); /* Clean slot for map()'s error checking. */
> +
> +    } while ( frame < final );
> +
> +
> +#ifndef NDEBUG /* Check (and clean) all unused mfns. */
> +    while ( mfn < hvmemul_ctxt->mfn + ARRAY_SIZE(hvmemul_ctxt->mfn) )
> +    {
> +        ASSERT(mfn_eq(*mfn, INVALID_MFN));
> +        *mfn++ = _mfn(0);
> +    }
> +#endif
> +}
> +
> +/*
>   * Convert addr from linear to physical form, valid over the range
>   * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
>   * the valid computed range. It is always >0 when X86EMUL_OKAY is
> returned.
> @@ -988,11 +1141,11 @@ static int hvmemul_write(
>      struct hvm_emulate_ctxt *hvmemul_ctxt =
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>      struct vcpu *curr = current;
> -    pagefault_info_t pfinfo;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
>      struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
>      int rc;
> +    void *mapping;
> 
>      if ( is_x86_system_segment(seg) )
>          pfec |= PFEC_implicit;
> @@ -1008,23 +1161,13 @@ static int hvmemul_write(
>           (vio->mmio_gla == (addr & PAGE_MASK)) )
>          return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> 
> -    rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
> +    mapping = hvmemul_map_linear_addr(addr, bytes, pfec,
> hvmemul_ctxt);
> +    if ( IS_ERR(mapping) )
> +        return ~PTR_ERR(mapping);
> 
> -    switch ( rc )
> -    {
> -    case HVMTRANS_okay:
> -        break;
> -    case HVMTRANS_bad_linear_to_gfn:
> -        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> -        return X86EMUL_EXCEPTION;
> -    case HVMTRANS_bad_gfn_to_mfn:
> -        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> -    case HVMTRANS_gfn_paged_out:
> -    case HVMTRANS_gfn_shared:
> -        return X86EMUL_RETRY;
> -    default:
> -        return X86EMUL_UNHANDLEABLE;
> -    }
> +    memcpy(mapping, p_data, bytes);
> +
> +    hvmemul_unmap_linear_addr(mapping, addr, bytes, hvmemul_ctxt);
> 
>      return X86EMUL_OKAY;
>  }
> diff --git a/xen/include/asm-x86/hvm/emulate.h b/xen/include/asm-
> x86/hvm/emulate.h
> index 8864775..d379a4a 100644
> --- a/xen/include/asm-x86/hvm/emulate.h
> +++ b/xen/include/asm-x86/hvm/emulate.h
> @@ -37,6 +37,13 @@ struct hvm_emulate_ctxt {
>      unsigned long seg_reg_accessed;
>      unsigned long seg_reg_dirty;
> 
> +    /*
> +     * MFNs behind temporary mappings in the write callback.  The length is
> +     * arbitrary, and can be increased if writes longer than PAGE_SIZE+1 are
> +     * needed.
> +     */
> +    mfn_t mfn[2];
> +
>      uint32_t intr_shadow;
> 
>      bool_t set_context;
> --
> 2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result
  2017-09-19 14:14 ` [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result Alexandru Isaila
  2017-09-19 14:16   ` Paul Durrant
@ 2017-09-19 14:39   ` Jan Beulich
  2017-09-19 14:45     ` Jan Beulich
  1 sibling, 1 reply; 12+ messages in thread
From: Jan Beulich @ 2017-09-19 14:39 UTC (permalink / raw)
  To: Alexandru Isaila
  Cc: tim, kevin.tian, sstabellini, wei.liu2, jun.nakajima,
	george.dunlap, andrew.cooper3, ian.jackson, xen-devel,
	paul.durrant, suravee.suthikulpanit, boris.ostrovsky

>>> On 19.09.17 at 16:14, <aisaila@bitdefender.com> wrote:
> From: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
> 
> ---
> Acked-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
> Acked-by: George Dunlap <george.dunlap@citrix.com>

Please avoid such misplaced tags in the future - the committer will
need to remember to remove the first --- separator in order for them
to not get lost while committing.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result
  2017-09-19 14:39   ` Jan Beulich
@ 2017-09-19 14:45     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2017-09-19 14:45 UTC (permalink / raw)
  To: Alexandru Isaila
  Cc: tim, kevin.tian, sstabellini, wei.liu2, jun.nakajima,
	george.dunlap, andrew.cooper3, ian.jackson, xen-devel,
	paul.durrant, suravee.suthikulpanit, boris.ostrovsky

>>> On 19.09.17 at 16:39, <JBeulich@suse.com> wrote:
>>>> On 19.09.17 at 16:14, <aisaila@bitdefender.com> wrote:
>> From: Andrew Cooper <andrew.cooper3@citrix.com>
>> 
>> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> 
>> ---
>> Acked-by: Jan Beulich <jbeulich@suse.com>
>> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
>> Acked-by: George Dunlap <george.dunlap@citrix.com>
> 
> Please avoid such misplaced tags in the future - the committer will
> need to remember to remove the first --- separator in order for them
> to not get lost while committing.

Additionally it looks like you've lost Tim's ack for the shadow parts.

Jan


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings
  2017-09-19 14:39   ` Paul Durrant
@ 2017-09-19 14:52     ` Jan Beulich
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Beulich @ 2017-09-19 14:52 UTC (permalink / raw)
  To: 'Alexandru Isaila', Paul Durrant
  Cc: Kevin Tian, sstabellini, Wei Liu, suravee.suthikulpanit,
	Andrew Cooper, Tim (Xen.org),
	George Dunlap, xen-devel, jun.nakajima, Ian Jackson,
	boris.ostrovsky

>>> On 19.09.17 at 16:39, <Paul.Durrant@citrix.com> wrote:
>> +static void *hvmemul_map_linear_addr(
>> +    unsigned long linear, unsigned int bytes, uint32_t pfec,
>> +    struct hvm_emulate_ctxt *hvmemul_ctxt)
>> +{
>> +    struct vcpu *curr = current;
>> +    void *err, *mapping;
>> +
>> +    /* First and final gfns which need mapping. */
>> +    unsigned long frame = linear >> PAGE_SHIFT, first = frame;
>> +    unsigned long final = (linear + bytes - !!bytes) >> PAGE_SHIFT;
>> +
>> +    /*
>> +     * mfn points to the next free slot.  All used slots have a page reference
>> +     * held on them.
>> +     */
>> +    mfn_t *mfn = &hvmemul_ctxt->mfn[0];
>> +
>> +    /*
>> +     * The caller has no legitimate reason for trying a zero-byte write, but
>> +     * final is calculate to fail safe in release builds.
>> +     *
>> +     * The maximum write size depends on the number of adjacent mfns[]
>> which
>> +     * can be vmap()'d, accouting for possible misalignment within the region.
>> +     * The higher level emulation callers are responsible for ensuring that
>> +     * mfns[] is large enough for the requested write size.
>> +     */
>> +    if ( bytes == 0 ||
>> +         final - first > ARRAY_SIZE(hvmemul_ctxt->mfn) - 1 )
> 
>>=, rather than the -1?

Yeah, I had pointed out that one too earlier on. Andrew gave a
reason that didn't really convince me, but which also made me
go silent despite

>> +    {
>> +        ASSERT_UNREACHABLE();
>> +        goto unhandleable;
>> +    }
>> +
>> +    do {
>> +        enum hvm_translation_result res;
>> +        struct page_info *page;
>> +        pagefault_info_t pfinfo;
>> +        p2m_type_t p2mt;
>> +
>> +        /* Error checking.  Confirm that the current slot is clean. */
>> +        ASSERT(mfn_x(*mfn) == 0);
>> +
>> +        res = hvm_translate_get_page(curr, frame << PAGE_SHIFT, true, pfec,
>> +                                     &pfinfo, &page, NULL, &p2mt);
>> +
>> +        switch ( res )
>> +        {
>> +        case HVMTRANS_okay:
>> +            break;
>> +
>> +        case HVMTRANS_bad_linear_to_gfn:
>> +            x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
>> +            err = ERR_PTR(~(long)X86EMUL_EXCEPTION);
> 
> Still the mysterious cast to long here and below that Jan pointed out.

Oh, I've even managed to overlook that. Without an explanation
why this is needed I would withdraw my R-b.

>> +            goto out;
>> +
>> +        case HVMTRANS_bad_gfn_to_mfn:
>> +            err = NULL;
>> +            goto out;
>> +
>> +        case HVMTRANS_gfn_paged_out:
>> +        case HVMTRANS_gfn_shared:
>> +            err = ERR_PTR(~(long)X86EMUL_RETRY);
>> +            goto out;
>> +
>> +        default:
>> +            goto unhandleable;
>> +        }
>> +
>> +        *mfn++ = _mfn(page_to_mfn(page));
>> +        frame++;
> 
> Increment still done here rather than being co-located with test below.

Indeed - if you dislike it going into the while(), at least put it right
ahead of it. Yet then again it sitting next to the mfn increment
doesn't look that bad either, and the mfn increment clearly can't
be moved down.

>> +static void hvmemul_unmap_linear_addr(
>> +    void *mapping, unsigned long linear, unsigned int bytes,
>> +    struct hvm_emulate_ctxt *hvmemul_ctxt)
>> +{
>> +    struct domain *currd = current->domain;
>> +    unsigned long frame = linear >> PAGE_SHIFT;
>> +    unsigned long final = (linear + bytes - !!bytes) >> PAGE_SHIFT;
>> +    mfn_t *mfn = &hvmemul_ctxt->mfn[0];
>> +
>> +    ASSERT(bytes > 0);
>> +
>> +    if ( frame == final )
>> +        unmap_domain_page(mapping);
>> +    else
>> +        vunmap(mapping);
>> +
>> +    do
>> +    {
>> +        ASSERT(mfn_valid(*mfn));
>> +        paging_mark_dirty(currd, *mfn);
>> +        put_page(mfn_to_page(mfn_x(*mfn)));
>> +
>> +        frame++;
> 
> Again, increment should be co-located with test IMO.
> 
>   Paul
> 
>> +        *mfn++ = _mfn(0); /* Clean slot for map()'s error checking. */
>> +
>> +    } while ( frame < final );

Well, here they're at least only spaced apart by another related
operation. It certainly would be nice if the two lines were at least
swapped.

Jan

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2017-09-19 14:52 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-19 14:14 [PATCH v3 0/3] Various XSA followups Alexandru Isaila
2017-09-19 14:14 ` [PATCH v3 1/3] x86/hvm: Rename enum hvm_copy_result to hvm_translation_result Alexandru Isaila
2017-09-19 14:16   ` Paul Durrant
2017-09-19 14:39   ` Jan Beulich
2017-09-19 14:45     ` Jan Beulich
2017-09-19 14:14 ` [PATCH v3 2/3] x86/hvm: Break out __hvm_copy()'s translation logic Alexandru Isaila
2017-09-19 14:27   ` Paul Durrant
2017-09-19 14:35     ` Jan Beulich
2017-09-19 14:14 ` [PATCH v3 3/3] x86/hvm: Implement hvmemul_write() using real mappings Alexandru Isaila
2017-09-19 14:38   ` Jan Beulich
2017-09-19 14:39   ` Paul Durrant
2017-09-19 14:52     ` Jan Beulich

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.