All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/2] x86/HVM: emulation adjustments
@ 2018-08-30 11:05 Jan Beulich
  2018-08-30 11:09 ` [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
                   ` (3 more replies)
  0 siblings, 4 replies; 28+ messages in thread
From: Jan Beulich @ 2018-08-30 11:05 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant

1: drop hvm_fetch_from_guest_linear()
2: split page straddling emulated accesses in more cases

Patch 2 is incomplete at this time, and hence only RFC.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-08-30 11:05 [PATCH 0/2] x86/HVM: emulation adjustments Jan Beulich
@ 2018-08-30 11:09 ` Jan Beulich
  2018-08-30 11:18   ` Andrew Cooper
  2018-08-30 11:24   ` Andrew Cooper
  2018-08-30 11:09 ` [PATCH RFC 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
                   ` (2 subsequent siblings)
  3 siblings, 2 replies; 28+ messages in thread
From: Jan Beulich @ 2018-08-30 11:09 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant, Wei Liu, Tim Deegan

It can easily be expressed through hvm_copy_from_guest_linear(), and in
two cases this even simplifies callers.

Suggested-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1060,6 +1060,8 @@ static int __hvmemul_read(
         pfec |= PFEC_implicit;
     else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
+    if ( access_type == hvm_access_insn_fetch )
+        pfec |= PFEC_insn_fetch;
 
     rc = hvmemul_virtual_to_linear(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
@@ -1071,9 +1073,7 @@ static int __hvmemul_read(
          (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
-    rc = ((access_type == hvm_access_insn_fetch) ?
-          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
-          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
 
     switch ( rc )
     {
@@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
                                         hvm_access_insn_fetch,
                                         &hvmemul_ctxt->seg_reg[x86_seg_cs],
                                         &addr) &&
-             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
-                                         sizeof(hvmemul_ctxt->insn_buf),
-                                         pfec, NULL) == HVMTRANS_okay) ?
+             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+                                        sizeof(hvmemul_ctxt->insn_buf),
+                                        pfec | PFEC_insn_fetch, NULL,
+                                        NULL) == HVMTRANS_okay) ?
             sizeof(hvmemul_ctxt->insn_buf) : 0;
     }
     else
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3296,15 +3296,6 @@ enum hvm_translation_result hvm_copy_fro
                       PFEC_page_present | pfec, pfinfo);
 }
 
-enum hvm_translation_result hvm_fetch_from_guest_linear(
-    void *buf, unsigned long addr, int size, uint32_t pfec,
-    pagefault_info_t *pfinfo)
-{
-    return __hvm_copy(buf, addr, size, current,
-                      HVMCOPY_from_guest | HVMCOPY_linear,
-                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
-}
-
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
 {
     int rc;
@@ -3758,8 +3749,9 @@ void hvm_ud_intercept(struct cpu_user_re
         if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
                                         sizeof(sig), hvm_access_insn_fetch,
                                         cs, &addr) &&
-             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
-                                          walk, NULL) == HVMTRANS_okay) &&
+             (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
+                                         walk | PFEC_insn_fetch, NULL,
+                                         NULL) == HVMTRANS_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
         {
             regs->rip += sizeof(sig);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -164,8 +164,9 @@ const struct x86_emulate_ops *shadow_ini
         (!hvm_translate_virtual_addr(
             x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
-         !hvm_fetch_from_guest_linear(
-             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+         !hvm_copy_from_guest_linear(
+             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+             PFEC_insn_fetch, NULL, NULL))
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
     return &hvm_shadow_emulator_ops;
@@ -198,8 +199,9 @@ void shadow_continue_emulation(struct sh
             (!hvm_translate_virtual_addr(
                 x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
                 hvm_access_insn_fetch, sh_ctxt, &addr) &&
-             !hvm_fetch_from_guest_linear(
-                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+             !hvm_copy_from_guest_linear(
+                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+                 PFEC_insn_fetch, NULL, NULL))
             ? sizeof(sh_ctxt->insn_buf) : 0;
         sh_ctxt->insn_buf_eip = regs->rip;
     }
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -122,10 +122,10 @@ hvm_read(enum x86_segment seg,
     if ( rc || !bytes )
         return rc;
 
-    if ( access_type == hvm_access_insn_fetch )
-        rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
-    else
-        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
+                                    (access_type == hvm_access_insn_fetch
+                                     ? PFEC_insn_fetch : 0),
+                                    &pfinfo);
 
     switch ( rc )
     {
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -100,9 +100,6 @@ enum hvm_translation_result hvm_copy_to_
 enum hvm_translation_result hvm_copy_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
-enum hvm_translation_result hvm_fetch_from_guest_linear(
-    void *buf, unsigned long addr, int size, uint32_t pfec,
-    pagefault_info_t *pfinfo);
 
 /*
  * Get a reference on the page under an HVM physical or linear address.  If




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH RFC 2/2] x86/HVM: split page straddling emulated accesses in more cases
  2018-08-30 11:05 [PATCH 0/2] x86/HVM: emulation adjustments Jan Beulich
  2018-08-30 11:09 ` [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
@ 2018-08-30 11:09 ` Jan Beulich
  2018-09-03  9:11   ` Paul Durrant
  2018-09-03 15:41 ` [PATCH v2 0/2] x86/HVM: emulation adjustments Jan Beulich
  2018-09-06 12:58 ` [PATCH v3 0/3] x86/HVM: emulation adjustments Jan Beulich
  3 siblings, 1 reply; 28+ messages in thread
From: Jan Beulich @ 2018-08-30 11:09 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant, Wei Liu

Assuming consecutive linear addresses map to all RAM or all MMIO is not
correct. Nor is assuming that a page straddling MMIO access will access
the same emulating component for both parts of the access. If a guest
RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
a page boundary, issue accesses separately for both parts.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
RFC: This clearly wants mirroring to the write path, and perhaps also
     to the fallback code on the RMW path. But I'd like to get a sense
     first on how welcome the general approach is.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1041,6 +1041,48 @@ static inline int hvmemul_linear_mmio_wr
                                       pfec, hvmemul_ctxt, translate);
 }
 
+static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
+                       uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    pagefault_info_t pfinfo;
+    int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+
+    switch ( rc )
+    {
+        unsigned int offset, part1;
+
+    case HVMTRANS_okay:
+        return X86EMUL_OKAY;
+
+    case HVMTRANS_bad_linear_to_gfn:
+        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+
+    case HVMTRANS_bad_gfn_to_mfn:
+        if ( pfec & PFEC_insn_fetch )
+            return X86EMUL_UNHANDLEABLE;
+
+        offset = addr & ~PAGE_MASK;
+        if ( offset + bytes <= PAGE_SIZE )
+            return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
+                                            hvmemul_ctxt, 0);
+
+        /* Split the access at the page boundary. */
+        part1 = PAGE_SIZE - offset;
+        rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
+        if ( rc == X86EMUL_OKAY )
+            rc = linear_read(addr + part1, bytes - part1, p_data + part1,
+                             pfec, hvmemul_ctxt);
+        return rc;
+
+    case HVMTRANS_gfn_paged_out:
+    case HVMTRANS_gfn_shared:
+        return X86EMUL_RETRY;
+    }
+
+    return X86EMUL_UNHANDLEABLE;
+}
+
 static int __hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
@@ -1049,11 +1091,9 @@ static int __hvmemul_read(
     enum hvm_access_type access_type,
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
-    struct vcpu *curr = current;
-    pagefault_info_t pfinfo;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
+    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
     int rc;
 
     if ( is_x86_system_segment(seg) )
@@ -1073,28 +1113,7 @@ static int __hvmemul_read(
          (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
-    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
-
-    switch ( rc )
-    {
-    case HVMTRANS_okay:
-        break;
-    case HVMTRANS_bad_linear_to_gfn:
-        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
-        return X86EMUL_EXCEPTION;
-    case HVMTRANS_bad_gfn_to_mfn:
-        if ( access_type == hvm_access_insn_fetch )
-            return X86EMUL_UNHANDLEABLE;
-
-        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
-    case HVMTRANS_gfn_paged_out:
-    case HVMTRANS_gfn_shared:
-        return X86EMUL_RETRY;
-    default:
-        return X86EMUL_UNHANDLEABLE;
-    }
-
-    return X86EMUL_OKAY;
+    return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
 }
 
 static int hvmemul_read(





_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-08-30 11:09 ` [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
@ 2018-08-30 11:18   ` Andrew Cooper
  2018-08-30 12:02     ` Jan Beulich
  2018-08-30 11:24   ` Andrew Cooper
  1 sibling, 1 reply; 28+ messages in thread
From: Andrew Cooper @ 2018-08-30 11:18 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Tim Deegan, Olaf Hering, Paul Durrant, Wei Liu

On 30/08/18 12:09, Jan Beulich wrote:
> It can easily be expressed through hvm_copy_from_guest_linear(), and in
> two cases this even simplifies callers.
>
> Suggested-by: Paul Durrant <paul.durrant@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

I really like this piece of cleanup, but...

> ---
> v2: New.
>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1060,6 +1060,8 @@ static int __hvmemul_read(
>          pfec |= PFEC_implicit;
>      else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
>          pfec |= PFEC_user_mode;
> +    if ( access_type == hvm_access_insn_fetch )
> +        pfec |= PFEC_insn_fetch;
>  
>      rc = hvmemul_virtual_to_linear(
>          seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
> @@ -1071,9 +1073,7 @@ static int __hvmemul_read(
>           (vio->mmio_gla == (addr & PAGE_MASK)) )
>          return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
>  
> -    rc = ((access_type == hvm_access_insn_fetch) ?
> -          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
> -          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
> +    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
>  
>      switch ( rc )
>      {
> @@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
>                                          hvm_access_insn_fetch,
>                                          &hvmemul_ctxt->seg_reg[x86_seg_cs],
>                                          &addr) &&
> -             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
> -                                         sizeof(hvmemul_ctxt->insn_buf),
> -                                         pfec, NULL) == HVMTRANS_okay) ?
> +             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
> +                                        sizeof(hvmemul_ctxt->insn_buf),
> +                                        pfec | PFEC_insn_fetch, NULL,
> +                                        NULL) == HVMTRANS_okay) ?

Does this even compile?  You seem to have an extra NULL here and several
later places.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-08-30 11:09 ` [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
  2018-08-30 11:18   ` Andrew Cooper
@ 2018-08-30 11:24   ` Andrew Cooper
  1 sibling, 0 replies; 28+ messages in thread
From: Andrew Cooper @ 2018-08-30 11:24 UTC (permalink / raw)
  To: Jan Beulich, xen-devel; +Cc: Tim Deegan, Olaf Hering, Paul Durrant, Wei Liu

On 30/08/18 12:09, Jan Beulich wrote:
> @@ -3758,8 +3749,9 @@ void hvm_ud_intercept(struct cpu_user_re
>          if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
>                                          sizeof(sig), hvm_access_insn_fetch,
>                                          cs, &addr) &&
> -             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
> -                                          walk, NULL) == HVMTRANS_okay) &&
> +             (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> +                                         walk | PFEC_insn_fetch, NULL,
> +                                         NULL) == HVMTRANS_okay) &&

This would be more simple by folding PFEC_insn_fetch into the
initialisation of walk, as this whole expression is just an instruction
fetch.

~Andrew

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-08-30 11:18   ` Andrew Cooper
@ 2018-08-30 12:02     ` Jan Beulich
  2018-08-30 12:22       ` Andrew Cooper
  0 siblings, 1 reply; 28+ messages in thread
From: Jan Beulich @ 2018-08-30 12:02 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Olaf Hering, Paul Durrant, Wei Liu, Tim Deegan

>>> On 30.08.18 at 13:18, <andrew.cooper3@citrix.com> wrote:
> On 30/08/18 12:09, Jan Beulich wrote:
>> @@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
>>                                          hvm_access_insn_fetch,
>>                                          &hvmemul_ctxt->seg_reg[x86_seg_cs],
>>                                          &addr) &&
>> -             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
>> -                                         sizeof(hvmemul_ctxt->insn_buf),
>> -                                         pfec, NULL) == HVMTRANS_okay) ?
>> +             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
>> +                                        sizeof(hvmemul_ctxt->insn_buf),
>> +                                        pfec | PFEC_insn_fetch, NULL,
>> +                                        NULL) == HVMTRANS_okay) ?
> 
> Does this even compile?  You seem to have an extra NULL here and several
> later places.

It does - with "x86/HVM: implement memory read caching" also
applied. IOW - I'm sorry, insufficient re-ordering work done
when moving these two ahead.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-08-30 12:02     ` Jan Beulich
@ 2018-08-30 12:22       ` Andrew Cooper
  2018-08-30 12:28         ` Jan Beulich
  0 siblings, 1 reply; 28+ messages in thread
From: Andrew Cooper @ 2018-08-30 12:22 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Olaf Hering, Paul Durrant, Wei Liu, Tim Deegan

On 30/08/18 13:02, Jan Beulich wrote:
>>>> On 30.08.18 at 13:18, <andrew.cooper3@citrix.com> wrote:
>> On 30/08/18 12:09, Jan Beulich wrote:
>>> @@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
>>>                                          hvm_access_insn_fetch,
>>>                                          &hvmemul_ctxt->seg_reg[x86_seg_cs],
>>>                                          &addr) &&
>>> -             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
>>> -                                         sizeof(hvmemul_ctxt->insn_buf),
>>> -                                         pfec, NULL) == HVMTRANS_okay) ?
>>> +             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
>>> +                                        sizeof(hvmemul_ctxt->insn_buf),
>>> +                                        pfec | PFEC_insn_fetch, NULL,
>>> +                                        NULL) == HVMTRANS_okay) ?
>> Does this even compile?  You seem to have an extra NULL here and several
>> later places.
> It does - with "x86/HVM: implement memory read caching" also
> applied. IOW - I'm sorry, insufficient re-ordering work done
> when moving these two ahead.

Does it?  This patch has a mix of callers with 4 and 5 parameters, which
is why I noticed it in the first place.

With it fixed up to compile, and preferably with the other adjustment
included, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-08-30 12:22       ` Andrew Cooper
@ 2018-08-30 12:28         ` Jan Beulich
  0 siblings, 0 replies; 28+ messages in thread
From: Jan Beulich @ 2018-08-30 12:28 UTC (permalink / raw)
  To: Andrew Cooper; +Cc: xen-devel, Olaf Hering, Paul Durrant, Wei Liu, Tim Deegan

>>> On 30.08.18 at 14:22, <andrew.cooper3@citrix.com> wrote:
> On 30/08/18 13:02, Jan Beulich wrote:
>>>>> On 30.08.18 at 13:18, <andrew.cooper3@citrix.com> wrote:
>>> On 30/08/18 12:09, Jan Beulich wrote:
>>>> @@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
>>>>                                          hvm_access_insn_fetch,
>>>>                                          &hvmemul_ctxt->seg_reg[x86_seg_cs],
>>>>                                          &addr) &&
>>>> -             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
>>>> -                                         sizeof(hvmemul_ctxt->insn_buf),
>>>> -                                         pfec, NULL) == HVMTRANS_okay) ?
>>>> +             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
>>>> +                                        sizeof(hvmemul_ctxt->insn_buf),
>>>> +                                        pfec | PFEC_insn_fetch, NULL,
>>>> +                                        NULL) == HVMTRANS_okay) ?
>>> Does this even compile?  You seem to have an extra NULL here and several
>>> later places.
>> It does - with "x86/HVM: implement memory read caching" also
>> applied. IOW - I'm sorry, insufficient re-ordering work done
>> when moving these two ahead.
> 
> Does it?  This patch has a mix of callers with 4 and 5 parameters, which
> is why I noticed it in the first place.

Right, hence the need for that other (re-based) patch on top. I believe
I've now moved things suitably between both patches.

> With it fixed up to compile, and preferably with the other adjustment
> included, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Thanks (and yes, I've followed the other suggestion too),
Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH RFC 2/2] x86/HVM: split page straddling emulated accesses in more cases
  2018-08-30 11:09 ` [PATCH RFC 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
@ 2018-09-03  9:11   ` Paul Durrant
  0 siblings, 0 replies; 28+ messages in thread
From: Paul Durrant @ 2018-09-03  9:11 UTC (permalink / raw)
  To: 'Jan Beulich', xen-devel; +Cc: Andrew Cooper, Olaf Hering, Wei Liu

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 30 August 2018 12:10
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>;
> Wei Liu <wei.liu2@citrix.com>
> Subject: [PATCH RFC 2/2] x86/HVM: split page straddling emulated accesses
> in more cases
> 
> Assuming consecutive linear addresses map to all RAM or all MMIO is not
> correct. Nor is assuming that a page straddling MMIO access will access
> the same emulating component for both parts of the access. If a guest
> RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
> a page boundary, issue accesses separately for both parts.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> RFC: This clearly wants mirroring to the write path, and perhaps also
>      to the fallback code on the RMW path. But I'd like to get a sense
>      first on how welcome the general approach is.

At first glance, of course, it appears that there would be a problem when the second linear_read() after a split returns X86EMUL_RETRY, thus causing the first one to be re-called when the emulation is re-done. Because the underlying cache handles the re-call this is not actually a problem though so I think the approach is ok and should also be fine on the write path.

  Paul

> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1041,6 +1041,48 @@ static inline int hvmemul_linear_mmio_wr
>                                        pfec, hvmemul_ctxt, translate);
>  }
> 
> +static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
> +                       uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
> +{
> +    pagefault_info_t pfinfo;
> +    int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec,
> &pfinfo);
> +
> +    switch ( rc )
> +    {
> +        unsigned int offset, part1;
> +
> +    case HVMTRANS_okay:
> +        return X86EMUL_OKAY;
> +
> +    case HVMTRANS_bad_linear_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> +        return X86EMUL_EXCEPTION;
> +
> +    case HVMTRANS_bad_gfn_to_mfn:
> +        if ( pfec & PFEC_insn_fetch )
> +            return X86EMUL_UNHANDLEABLE;
> +
> +        offset = addr & ~PAGE_MASK;
> +        if ( offset + bytes <= PAGE_SIZE )
> +            return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> +                                            hvmemul_ctxt, 0);
> +
> +        /* Split the access at the page boundary. */
> +        part1 = PAGE_SIZE - offset;
> +        rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
> +        if ( rc == X86EMUL_OKAY )
> +            rc = linear_read(addr + part1, bytes - part1, p_data + part1,
> +                             pfec, hvmemul_ctxt);
> +        return rc;
> +
> +    case HVMTRANS_gfn_paged_out:
> +    case HVMTRANS_gfn_shared:
> +        return X86EMUL_RETRY;
> +    }
> +
> +    return X86EMUL_UNHANDLEABLE;
> +}
> +
>  static int __hvmemul_read(
>      enum x86_segment seg,
>      unsigned long offset,
> @@ -1049,11 +1091,9 @@ static int __hvmemul_read(
>      enum hvm_access_type access_type,
>      struct hvm_emulate_ctxt *hvmemul_ctxt)
>  {
> -    struct vcpu *curr = current;
> -    pagefault_info_t pfinfo;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present;
> -    struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
> +    struct hvm_vcpu_io *vio = &current->arch.hvm_vcpu.hvm_io;
>      int rc;
> 
>      if ( is_x86_system_segment(seg) )
> @@ -1073,28 +1113,7 @@ static int __hvmemul_read(
>           (vio->mmio_gla == (addr & PAGE_MASK)) )
>          return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> 
> -    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
> -
> -    switch ( rc )
> -    {
> -    case HVMTRANS_okay:
> -        break;
> -    case HVMTRANS_bad_linear_to_gfn:
> -        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> -        return X86EMUL_EXCEPTION;
> -    case HVMTRANS_bad_gfn_to_mfn:
> -        if ( access_type == hvm_access_insn_fetch )
> -            return X86EMUL_UNHANDLEABLE;
> -
> -        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> -    case HVMTRANS_gfn_paged_out:
> -    case HVMTRANS_gfn_shared:
> -        return X86EMUL_RETRY;
> -    default:
> -        return X86EMUL_UNHANDLEABLE;
> -    }
> -
> -    return X86EMUL_OKAY;
> +    return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
>  }
> 
>  static int hvmemul_read(
> 
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v2 0/2] x86/HVM: emulation adjustments
  2018-08-30 11:05 [PATCH 0/2] x86/HVM: emulation adjustments Jan Beulich
  2018-08-30 11:09 ` [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
  2018-08-30 11:09 ` [PATCH RFC 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
@ 2018-09-03 15:41 ` Jan Beulich
  2018-09-03 15:43   ` [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
  2018-09-03 15:44   ` [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
  2018-09-06 12:58 ` [PATCH v3 0/3] x86/HVM: emulation adjustments Jan Beulich
  3 siblings, 2 replies; 28+ messages in thread
From: Jan Beulich @ 2018-09-03 15:41 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant

1: drop hvm_fetch_from_guest_linear()
2: split page straddling emulated accesses in more cases

v2: Patch 1 now builds cleanly (without other patches in place the up-to-
date versions of which are yet to be posted), and patch 2 is no longer RFC.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-09-03 15:41 ` [PATCH v2 0/2] x86/HVM: emulation adjustments Jan Beulich
@ 2018-09-03 15:43   ` Jan Beulich
  2018-09-04 10:01     ` Jan Beulich
  2018-09-05  7:20     ` Olaf Hering
  2018-09-03 15:44   ` [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
  1 sibling, 2 replies; 28+ messages in thread
From: Jan Beulich @ 2018-09-03 15:43 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant

It can easily be expressed through hvm_copy_from_guest_linear(), and in
two cases this even simplifies callers.

Suggested-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
v2: Make sure this compiles standalone. Slightly adjust change in
    hvm_ud_intercept().

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1060,6 +1060,8 @@ static int __hvmemul_read(
         pfec |= PFEC_implicit;
     else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
+    if ( access_type == hvm_access_insn_fetch )
+        pfec |= PFEC_insn_fetch;
 
     rc = hvmemul_virtual_to_linear(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
@@ -1071,9 +1073,7 @@ static int __hvmemul_read(
          (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
-    rc = ((access_type == hvm_access_insn_fetch) ?
-          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
-          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
 
     switch ( rc )
     {
@@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
                                         hvm_access_insn_fetch,
                                         &hvmemul_ctxt->seg_reg[x86_seg_cs],
                                         &addr) &&
-             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
-                                         sizeof(hvmemul_ctxt->insn_buf),
-                                         pfec, NULL) == HVMTRANS_okay) ?
+             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+                                        sizeof(hvmemul_ctxt->insn_buf),
+                                        pfec | PFEC_insn_fetch,
+                                        NULL) == HVMTRANS_okay) ?
             sizeof(hvmemul_ctxt->insn_buf) : 0;
     }
     else
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3287,15 +3287,6 @@ enum hvm_translation_result hvm_copy_fro
                       PFEC_page_present | pfec, pfinfo);
 }
 
-enum hvm_translation_result hvm_fetch_from_guest_linear(
-    void *buf, unsigned long addr, int size, uint32_t pfec,
-    pagefault_info_t *pfinfo)
-{
-    return __hvm_copy(buf, addr, size, current,
-                      HVMCOPY_from_guest | HVMCOPY_linear,
-                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
-}
-
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
 {
     int rc;
@@ -3741,16 +3732,16 @@ void hvm_ud_intercept(struct cpu_user_re
     if ( opt_hvm_fep )
     {
         const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
-        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].dpl == 3)
-            ? PFEC_user_mode : 0;
+        uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
+                         ? PFEC_user_mode : 0) | PFEC_insn_fetch;
         unsigned long addr;
         char sig[5]; /* ud2; .ascii "xen" */
 
         if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
                                         sizeof(sig), hvm_access_insn_fetch,
                                         cs, &addr) &&
-             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
-                                          walk, NULL) == HVMTRANS_okay) &&
+             (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
+                                         walk, NULL) == HVMTRANS_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
         {
             regs->rip += sizeof(sig);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -164,8 +164,9 @@ const struct x86_emulate_ops *shadow_ini
         (!hvm_translate_virtual_addr(
             x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
-         !hvm_fetch_from_guest_linear(
-             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+         !hvm_copy_from_guest_linear(
+             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+             PFEC_insn_fetch, NULL))
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
     return &hvm_shadow_emulator_ops;
@@ -198,8 +199,9 @@ void shadow_continue_emulation(struct sh
             (!hvm_translate_virtual_addr(
                 x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
                 hvm_access_insn_fetch, sh_ctxt, &addr) &&
-             !hvm_fetch_from_guest_linear(
-                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+             !hvm_copy_from_guest_linear(
+                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+                 PFEC_insn_fetch, NULL))
             ? sizeof(sh_ctxt->insn_buf) : 0;
         sh_ctxt->insn_buf_eip = regs->rip;
     }
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -122,10 +122,10 @@ hvm_read(enum x86_segment seg,
     if ( rc || !bytes )
         return rc;
 
-    if ( access_type == hvm_access_insn_fetch )
-        rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
-    else
-        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
+                                    (access_type == hvm_access_insn_fetch
+                                     ? PFEC_insn_fetch : 0),
+                                    &pfinfo);
 
     switch ( rc )
     {
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -100,9 +100,6 @@ enum hvm_translation_result hvm_copy_to_
 enum hvm_translation_result hvm_copy_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
-enum hvm_translation_result hvm_fetch_from_guest_linear(
-    void *buf, unsigned long addr, int size, uint32_t pfec,
-    pagefault_info_t *pfinfo);
 
 /*
  * Get a reference on the page under an HVM physical or linear address.  If




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases
  2018-09-03 15:41 ` [PATCH v2 0/2] x86/HVM: emulation adjustments Jan Beulich
  2018-09-03 15:43   ` [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
@ 2018-09-03 15:44   ` Jan Beulich
  2018-09-03 16:15     ` Paul Durrant
  2018-09-05  7:22     ` Olaf Hering
  1 sibling, 2 replies; 28+ messages in thread
From: Jan Beulich @ 2018-09-03 15:44 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant

Assuming consecutive linear addresses map to all RAM or all MMIO is not
correct. Nor is assuming that a page straddling MMIO access will access
the same emulating component for both parts of the access. If a guest
RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
a page boundary, issue accesses separately for both parts.

The extra call to known_glfn() from hvmemul_write() is just to preserve
original behavior; we should consider dropping this (also to make sure
the intended effect of 8cbd4fb0b7 ["x86/hvm: implement hvmemul_write()
using real mappings"] is achieved in all cases where it can be achieved
without further rework), or alternatively we perhaps ought to mirror
this in hvmemul_rmw().

Note that the correctness of this depends on the MMIO caching used
elsewhere in the emulation code.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Also handle hvmemul_{write,rmw}().

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1041,6 +1041,110 @@ static inline int hvmemul_linear_mmio_wr
                                       pfec, hvmemul_ctxt, translate);
 }
 
+static bool known_glfn(unsigned long addr, unsigned int bytes, uint32_t pfec)
+{
+    const struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
+
+    if ( pfec & PFEC_write_access )
+    {
+        if ( !vio->mmio_access.write_access )
+            return false;
+    }
+    else if ( pfec & PFEC_insn_fetch )
+    {
+        if ( !vio->mmio_access.insn_fetch )
+            return false;
+    }
+    else if ( !vio->mmio_access.read_access )
+            return false;
+
+    return (vio->mmio_gla == (addr & PAGE_MASK) &&
+            (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
+}
+
+static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
+                       uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    pagefault_info_t pfinfo;
+    int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+
+    switch ( rc )
+    {
+        unsigned int offset, part1;
+
+    case HVMTRANS_okay:
+        return X86EMUL_OKAY;
+
+    case HVMTRANS_bad_linear_to_gfn:
+        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+
+    case HVMTRANS_bad_gfn_to_mfn:
+        if ( pfec & PFEC_insn_fetch )
+            return X86EMUL_UNHANDLEABLE;
+
+        offset = addr & ~PAGE_MASK;
+        if ( offset + bytes <= PAGE_SIZE )
+            return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
+                                            hvmemul_ctxt,
+                                            known_glfn(addr, bytes, pfec));
+
+        /* Split the access at the page boundary. */
+        part1 = PAGE_SIZE - offset;
+        rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
+        if ( rc == X86EMUL_OKAY )
+            rc = linear_read(addr + part1, bytes - part1, p_data + part1,
+                             pfec, hvmemul_ctxt);
+        return rc;
+
+    case HVMTRANS_gfn_paged_out:
+    case HVMTRANS_gfn_shared:
+        return X86EMUL_RETRY;
+    }
+
+    return X86EMUL_UNHANDLEABLE;
+}
+
+static int linear_write(unsigned long addr, unsigned int bytes, void *p_data,
+                        uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    pagefault_info_t pfinfo;
+    int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+
+    switch ( rc )
+    {
+        unsigned int offset, part1;
+
+    case HVMTRANS_okay:
+        return X86EMUL_OKAY;
+
+    case HVMTRANS_bad_linear_to_gfn:
+        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+
+    case HVMTRANS_bad_gfn_to_mfn:
+        offset = addr & ~PAGE_MASK;
+        if ( offset + bytes <= PAGE_SIZE )
+            return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
+                                             hvmemul_ctxt,
+                                             known_glfn(addr, bytes, pfec));
+
+        /* Split the access at the page boundary. */
+        part1 = PAGE_SIZE - offset;
+        rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
+        if ( rc == X86EMUL_OKAY )
+            rc = linear_write(addr + part1, bytes - part1, p_data + part1,
+                              pfec, hvmemul_ctxt);
+        return rc;
+
+    case HVMTRANS_gfn_paged_out:
+    case HVMTRANS_gfn_shared:
+        return X86EMUL_RETRY;
+    }
+
+    return X86EMUL_UNHANDLEABLE;
+}
+
 static int __hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
@@ -1049,11 +1153,8 @@ static int __hvmemul_read(
     enum hvm_access_type access_type,
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
-    struct vcpu *curr = current;
-    pagefault_info_t pfinfo;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
     int rc;
 
     if ( is_x86_system_segment(seg) )
@@ -1067,34 +1168,8 @@ static int __hvmemul_read(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY || !bytes )
         return rc;
-    if ( ((access_type != hvm_access_insn_fetch
-           ? vio->mmio_access.read_access
-           : vio->mmio_access.insn_fetch)) &&
-         (vio->mmio_gla == (addr & PAGE_MASK)) )
-        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
-
-    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
-
-    switch ( rc )
-    {
-    case HVMTRANS_okay:
-        break;
-    case HVMTRANS_bad_linear_to_gfn:
-        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
-        return X86EMUL_EXCEPTION;
-    case HVMTRANS_bad_gfn_to_mfn:
-        if ( access_type == hvm_access_insn_fetch )
-            return X86EMUL_UNHANDLEABLE;
-
-        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
-    case HVMTRANS_gfn_paged_out:
-    case HVMTRANS_gfn_shared:
-        return X86EMUL_RETRY;
-    default:
-        return X86EMUL_UNHANDLEABLE;
-    }
 
-    return X86EMUL_OKAY;
+    return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
 }
 
 static int hvmemul_read(
@@ -1171,12 +1246,10 @@ static int hvmemul_write(
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    struct vcpu *curr = current;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
     int rc;
-    void *mapping;
+    void *mapping = NULL;
 
     if ( is_x86_system_segment(seg) )
         pfec |= PFEC_implicit;
@@ -1188,16 +1261,15 @@ static int hvmemul_write(
     if ( rc != X86EMUL_OKAY || !bytes )
         return rc;
 
-    if ( vio->mmio_access.write_access &&
-         (vio->mmio_gla == (addr & PAGE_MASK)) )
-        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
-
-    mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
-    if ( IS_ERR(mapping) )
-        return ~PTR_ERR(mapping);
+    if ( !known_glfn(addr, bytes, pfec) )
+    {
+        mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+        if ( IS_ERR(mapping) )
+             return ~PTR_ERR(mapping);
+    }
 
     if ( !mapping )
-        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
+        return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt);
 
     memcpy(mapping, p_data, bytes);
 
@@ -1218,7 +1290,6 @@ static int hvmemul_rmw(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
-    struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
     int rc;
     void *mapping;
 
@@ -1244,18 +1315,14 @@ static int hvmemul_rmw(
     else
     {
         unsigned long data = 0;
-        bool known_gpfn = vio->mmio_access.write_access &&
-                          vio->mmio_gla == (addr & PAGE_MASK);
 
         if ( bytes > sizeof(data) )
             return X86EMUL_UNHANDLEABLE;
-        rc = hvmemul_linear_mmio_read(addr, bytes, &data, pfec, hvmemul_ctxt,
-                                      known_gpfn);
+        rc = linear_read(addr, bytes, &data, pfec, hvmemul_ctxt);
         if ( rc == X86EMUL_OKAY )
             rc = x86_emul_rmw(&data, bytes, eflags, state, ctxt);
         if ( rc == X86EMUL_OKAY )
-            rc = hvmemul_linear_mmio_write(addr, bytes, &data, pfec,
-                                           hvmemul_ctxt, known_gpfn);
+            rc = linear_write(addr, bytes, &data, pfec, hvmemul_ctxt);
     }
 
     return rc;




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases
  2018-09-03 15:44   ` [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
@ 2018-09-03 16:15     ` Paul Durrant
  2018-09-04  7:43       ` Jan Beulich
  2018-09-05  7:22     ` Olaf Hering
  1 sibling, 1 reply; 28+ messages in thread
From: Paul Durrant @ 2018-09-03 16:15 UTC (permalink / raw)
  To: 'Jan Beulich', xen-devel; +Cc: Andrew Cooper, Olaf Hering

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 03 September 2018 16:45
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>
> Subject: [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in
> more cases
> 
> Assuming consecutive linear addresses map to all RAM or all MMIO is not
> correct. Nor is assuming that a page straddling MMIO access will access
> the same emulating component for both parts of the access. If a guest
> RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
> a page boundary, issue accesses separately for both parts.
> 
> The extra call to known_glfn() from hvmemul_write() is just to preserve
> original behavior; we should consider dropping this (also to make sure
> the intended effect of 8cbd4fb0b7 ["x86/hvm: implement hvmemul_write()
> using real mappings"] is achieved in all cases where it can be achieved
> without further rework), or alternatively we perhaps ought to mirror
> this in hvmemul_rmw().
> 
> Note that the correctness of this depends on the MMIO caching used
> elsewhere in the emulation code.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Also handle hvmemul_{write,rmw}().
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1041,6 +1041,110 @@ static inline int hvmemul_linear_mmio_wr
>                                        pfec, hvmemul_ctxt, translate);
>  }
> 
> +static bool known_glfn(unsigned long addr, unsigned int bytes, uint32_t
> pfec)
> +{
> +    const struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
> +
> +    if ( pfec & PFEC_write_access )
> +    {
> +        if ( !vio->mmio_access.write_access )
> +            return false;
> +    }
> +    else if ( pfec & PFEC_insn_fetch )
> +    {
> +        if ( !vio->mmio_access.insn_fetch )
> +            return false;
> +    }
> +    else if ( !vio->mmio_access.read_access )
> +            return false;
> +
> +    return (vio->mmio_gla == (addr & PAGE_MASK) &&
> +            (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
> +}
> +

Would it be possible to split the introduction of the above function into a separate patch? AFAICT it does not seem to be intrinsically involved with handle page straddling. It was certainly not there in your RFC patch.

  Paul

> +static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
> +                       uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
> +{
> +    pagefault_info_t pfinfo;
> +    int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec,
> &pfinfo);
> +
> +    switch ( rc )
> +    {
> +        unsigned int offset, part1;
> +
> +    case HVMTRANS_okay:
> +        return X86EMUL_OKAY;
> +
> +    case HVMTRANS_bad_linear_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> +        return X86EMUL_EXCEPTION;
> +
> +    case HVMTRANS_bad_gfn_to_mfn:
> +        if ( pfec & PFEC_insn_fetch )
> +            return X86EMUL_UNHANDLEABLE;
> +
> +        offset = addr & ~PAGE_MASK;
> +        if ( offset + bytes <= PAGE_SIZE )
> +            return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> +                                            hvmemul_ctxt,
> +                                            known_glfn(addr, bytes, pfec));
> +
> +        /* Split the access at the page boundary. */
> +        part1 = PAGE_SIZE - offset;
> +        rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
> +        if ( rc == X86EMUL_OKAY )
> +            rc = linear_read(addr + part1, bytes - part1, p_data + part1,
> +                             pfec, hvmemul_ctxt);
> +        return rc;
> +
> +    case HVMTRANS_gfn_paged_out:
> +    case HVMTRANS_gfn_shared:
> +        return X86EMUL_RETRY;
> +    }
> +
> +    return X86EMUL_UNHANDLEABLE;
> +}
> +
> +static int linear_write(unsigned long addr, unsigned int bytes, void *p_data,
> +                        uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
> +{
> +    pagefault_info_t pfinfo;
> +    int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
> +
> +    switch ( rc )
> +    {
> +        unsigned int offset, part1;
> +
> +    case HVMTRANS_okay:
> +        return X86EMUL_OKAY;
> +
> +    case HVMTRANS_bad_linear_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> +        return X86EMUL_EXCEPTION;
> +
> +    case HVMTRANS_bad_gfn_to_mfn:
> +        offset = addr & ~PAGE_MASK;
> +        if ( offset + bytes <= PAGE_SIZE )
> +            return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> +                                             hvmemul_ctxt,
> +                                             known_glfn(addr, bytes, pfec));
> +
> +        /* Split the access at the page boundary. */
> +        part1 = PAGE_SIZE - offset;
> +        rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
> +        if ( rc == X86EMUL_OKAY )
> +            rc = linear_write(addr + part1, bytes - part1, p_data + part1,
> +                              pfec, hvmemul_ctxt);
> +        return rc;
> +
> +    case HVMTRANS_gfn_paged_out:
> +    case HVMTRANS_gfn_shared:
> +        return X86EMUL_RETRY;
> +    }
> +
> +    return X86EMUL_UNHANDLEABLE;
> +}
> +
>  static int __hvmemul_read(
>      enum x86_segment seg,
>      unsigned long offset,
> @@ -1049,11 +1153,8 @@ static int __hvmemul_read(
>      enum hvm_access_type access_type,
>      struct hvm_emulate_ctxt *hvmemul_ctxt)
>  {
> -    struct vcpu *curr = current;
> -    pagefault_info_t pfinfo;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present;
> -    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
>      int rc;
> 
>      if ( is_x86_system_segment(seg) )
> @@ -1067,34 +1168,8 @@ static int __hvmemul_read(
>          seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
>      if ( rc != X86EMUL_OKAY || !bytes )
>          return rc;
> -    if ( ((access_type != hvm_access_insn_fetch
> -           ? vio->mmio_access.read_access
> -           : vio->mmio_access.insn_fetch)) &&
> -         (vio->mmio_gla == (addr & PAGE_MASK)) )
> -        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> -
> -    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
> -
> -    switch ( rc )
> -    {
> -    case HVMTRANS_okay:
> -        break;
> -    case HVMTRANS_bad_linear_to_gfn:
> -        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> -        return X86EMUL_EXCEPTION;
> -    case HVMTRANS_bad_gfn_to_mfn:
> -        if ( access_type == hvm_access_insn_fetch )
> -            return X86EMUL_UNHANDLEABLE;
> -
> -        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> -    case HVMTRANS_gfn_paged_out:
> -    case HVMTRANS_gfn_shared:
> -        return X86EMUL_RETRY;
> -    default:
> -        return X86EMUL_UNHANDLEABLE;
> -    }
> 
> -    return X86EMUL_OKAY;
> +    return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
>  }
> 
>  static int hvmemul_read(
> @@ -1171,12 +1246,10 @@ static int hvmemul_write(
>  {
>      struct hvm_emulate_ctxt *hvmemul_ctxt =
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> -    struct vcpu *curr = current;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
> -    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
>      int rc;
> -    void *mapping;
> +    void *mapping = NULL;
> 
>      if ( is_x86_system_segment(seg) )
>          pfec |= PFEC_implicit;
> @@ -1188,16 +1261,15 @@ static int hvmemul_write(
>      if ( rc != X86EMUL_OKAY || !bytes )
>          return rc;
> 
> -    if ( vio->mmio_access.write_access &&
> -         (vio->mmio_gla == (addr & PAGE_MASK)) )
> -        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> -
> -    mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
> -    if ( IS_ERR(mapping) )
> -        return ~PTR_ERR(mapping);
> +    if ( !known_glfn(addr, bytes, pfec) )
> +    {
> +        mapping = hvmemul_map_linear_addr(addr, bytes, pfec,
> hvmemul_ctxt);
> +        if ( IS_ERR(mapping) )
> +             return ~PTR_ERR(mapping);
> +    }
> 
>      if ( !mapping )
> -        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> +        return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt);
> 
>      memcpy(mapping, p_data, bytes);
> 
> @@ -1218,7 +1290,6 @@ static int hvmemul_rmw(
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
> -    struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
>      int rc;
>      void *mapping;
> 
> @@ -1244,18 +1315,14 @@ static int hvmemul_rmw(
>      else
>      {
>          unsigned long data = 0;
> -        bool known_gpfn = vio->mmio_access.write_access &&
> -                          vio->mmio_gla == (addr & PAGE_MASK);
> 
>          if ( bytes > sizeof(data) )
>              return X86EMUL_UNHANDLEABLE;
> -        rc = hvmemul_linear_mmio_read(addr, bytes, &data, pfec,
> hvmemul_ctxt,
> -                                      known_gpfn);
> +        rc = linear_read(addr, bytes, &data, pfec, hvmemul_ctxt);
>          if ( rc == X86EMUL_OKAY )
>              rc = x86_emul_rmw(&data, bytes, eflags, state, ctxt);
>          if ( rc == X86EMUL_OKAY )
> -            rc = hvmemul_linear_mmio_write(addr, bytes, &data, pfec,
> -                                           hvmemul_ctxt, known_gpfn);
> +            rc = linear_write(addr, bytes, &data, pfec, hvmemul_ctxt);
>      }
> 
>      return rc;
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases
  2018-09-03 16:15     ` Paul Durrant
@ 2018-09-04  7:43       ` Jan Beulich
  2018-09-04  8:15         ` Paul Durrant
  0 siblings, 1 reply; 28+ messages in thread
From: Jan Beulich @ 2018-09-04  7:43 UTC (permalink / raw)
  To: Paul Durrant; +Cc: Andrew Cooper, Olaf Hering, xen-devel

>>> On 03.09.18 at 18:15, <Paul.Durrant@citrix.com> wrote:
>> From: Jan Beulich [mailto:JBeulich@suse.com]
>> Sent: 03 September 2018 16:45
>>[...]
>> The extra call to known_glfn() from hvmemul_write() is just to preserve
>> original behavior; we should consider dropping this (also to make sure
>> the intended effect of 8cbd4fb0b7 ["x86/hvm: implement hvmemul_write()
>> using real mappings"] is achieved in all cases where it can be achieved
>> without further rework), or alternatively we perhaps ought to mirror
>> this in hvmemul_rmw().

If you really want me to do the change below, could I have an
opinion on the above as well, as this may imply further re-work
of the patch?

>> --- a/xen/arch/x86/hvm/emulate.c
>> +++ b/xen/arch/x86/hvm/emulate.c
>> @@ -1041,6 +1041,110 @@ static inline int hvmemul_linear_mmio_wr
>>                                        pfec, hvmemul_ctxt, translate);
>>  }
>> 
>> +static bool known_glfn(unsigned long addr, unsigned int bytes, uint32_t
>> pfec)
>> +{
>> +    const struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
>> +
>> +    if ( pfec & PFEC_write_access )
>> +    {
>> +        if ( !vio->mmio_access.write_access )
>> +            return false;
>> +    }
>> +    else if ( pfec & PFEC_insn_fetch )
>> +    {
>> +        if ( !vio->mmio_access.insn_fetch )
>> +            return false;
>> +    }
>> +    else if ( !vio->mmio_access.read_access )
>> +            return false;
>> +
>> +    return (vio->mmio_gla == (addr & PAGE_MASK) &&
>> +            (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
>> +}
>> +
> 
> Would it be possible to split the introduction of the above function into a 
> separate patch? AFAICT it does not seem to be intrinsically involved with 
> handle page straddling. It was certainly not there in your RFC patch.

The need for (or at least desirability of) it became obvious with the addition
of the logic to the write and rmw paths. It _could_ be split out, but it now is
a strictly necessary part/prereq of the change here.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases
  2018-09-04  7:43       ` Jan Beulich
@ 2018-09-04  8:15         ` Paul Durrant
  2018-09-04  8:46           ` Jan Beulich
  0 siblings, 1 reply; 28+ messages in thread
From: Paul Durrant @ 2018-09-04  8:15 UTC (permalink / raw)
  To: 'Jan Beulich'; +Cc: Andrew Cooper, Olaf Hering, xen-devel

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 04 September 2018 08:44
> To: Paul Durrant <Paul.Durrant@citrix.com>
> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; xen-devel <xen-
> devel@lists.xenproject.org>
> Subject: RE: [PATCH v2 2/2] x86/HVM: split page straddling emulated
> accesses in more cases
> 
> >>> On 03.09.18 at 18:15, <Paul.Durrant@citrix.com> wrote:
> >> From: Jan Beulich [mailto:JBeulich@suse.com]
> >> Sent: 03 September 2018 16:45
> >>[...]
> >> The extra call to known_glfn() from hvmemul_write() is just to preserve
> >> original behavior; we should consider dropping this (also to make sure
> >> the intended effect of 8cbd4fb0b7 ["x86/hvm: implement
> hvmemul_write()
> >> using real mappings"] is achieved in all cases where it can be achieved
> >> without further rework), or alternatively we perhaps ought to mirror
> >> this in hvmemul_rmw().
> 
> If you really want me to do the change below, could I have an
> opinion on the above as well, as this may imply further re-work
> of the patch?

It seems to me that the behaviour should be mirrored in hvmemul_rmw() to be correct.

> 
> >> --- a/xen/arch/x86/hvm/emulate.c
> >> +++ b/xen/arch/x86/hvm/emulate.c
> >> @@ -1041,6 +1041,110 @@ static inline int hvmemul_linear_mmio_wr
> >>                                        pfec, hvmemul_ctxt, translate);
> >>  }
> >>
> >> +static bool known_glfn(unsigned long addr, unsigned int bytes, uint32_t
> >> pfec)
> >> +{
> >> +    const struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
> >> +
> >> +    if ( pfec & PFEC_write_access )
> >> +    {
> >> +        if ( !vio->mmio_access.write_access )
> >> +            return false;
> >> +    }
> >> +    else if ( pfec & PFEC_insn_fetch )
> >> +    {
> >> +        if ( !vio->mmio_access.insn_fetch )
> >> +            return false;
> >> +    }
> >> +    else if ( !vio->mmio_access.read_access )
> >> +            return false;
> >> +
> >> +    return (vio->mmio_gla == (addr & PAGE_MASK) &&
> >> +            (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
> >> +}
> >> +
> >
> > Would it be possible to split the introduction of the above function into a
> > separate patch? AFAICT it does not seem to be intrinsically involved with
> > handle page straddling. It was certainly not there in your RFC patch.
> 
> The need for (or at least desirability of) it became obvious with the addition
> of the logic to the write and rmw paths. It _could_ be split out, but it now is
> a strictly necessary part/prereq of the change here.

I was thinking it would be clearer to introduce known_glfn() in a patch prior to this one and then use it in the if statements just after the calls to hvmemul_virtual_to_linear() in read and write, possibly re-working rmw at that point also.

  Paul

> 
> Jan
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases
  2018-09-04  8:15         ` Paul Durrant
@ 2018-09-04  8:46           ` Jan Beulich
  0 siblings, 0 replies; 28+ messages in thread
From: Jan Beulich @ 2018-09-04  8:46 UTC (permalink / raw)
  To: Paul Durrant; +Cc: Andrew Cooper, Olaf Hering, xen-devel

>>> On 04.09.18 at 10:15, <Paul.Durrant@citrix.com> wrote:
>> From: Jan Beulich [mailto:JBeulich@suse.com]
>> Sent: 04 September 2018 08:44
>> 
>> >>> On 03.09.18 at 18:15, <Paul.Durrant@citrix.com> wrote:
>> >> From: Jan Beulich [mailto:JBeulich@suse.com]
>> >> Sent: 03 September 2018 16:45
>> >>[...]
>> >> The extra call to known_glfn() from hvmemul_write() is just to preserve
>> >> original behavior; we should consider dropping this (also to make sure
>> >> the intended effect of 8cbd4fb0b7 ["x86/hvm: implement
>> hvmemul_write()
>> >> using real mappings"] is achieved in all cases where it can be achieved
>> >> without further rework), or alternatively we perhaps ought to mirror
>> >> this in hvmemul_rmw().
>> 
>> If you really want me to do the change below, could I have an
>> opinion on the above as well, as this may imply further re-work
>> of the patch?
> 
> It seems to me that the behaviour should be mirrored in hvmemul_rmw() to be 
> correct.

Interesting. As said in the patch description, the presence of the extra
conditional looks to be preventing what 8cbd4fb0b7 wanted to achieve.
The present (prior to this patch) short circuiting to call
hvmemul_linear_mmio_write() when the GFN is known is one of the
things getting in the way of split accesses. When the first part of the
access is what we know the GFN for, but the second part is in RAM,
things won't work. Furthermore I think this is also an issue for non-split
accesses - the second call to handle_mmio_with_translation() from
hvm_hap_nested_page_fault() is not limited to MMIO ranges.

So I think the question is the other way around: Is there anything that
would break if the conditional was removed (making it match the rmw
case)?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-09-03 15:43   ` [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
@ 2018-09-04 10:01     ` Jan Beulich
  2018-09-05  7:20     ` Olaf Hering
  1 sibling, 0 replies; 28+ messages in thread
From: Jan Beulich @ 2018-09-04 10:01 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant, Tim Deegan

>>> On 03.09.18 at 17:43, <JBeulich@suse.com> wrote:
> It can easily be expressed through hvm_copy_from_guest_linear(), and in
> two cases this even simplifies callers.
> 
> Suggested-by: Paul Durrant <paul.durrant@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> ---
> v2: Make sure this compiles standalone. Slightly adjust change in
>     hvm_ud_intercept().

Should have Cc-ed Tim as well.

Jan

> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1060,6 +1060,8 @@ static int __hvmemul_read(
>          pfec |= PFEC_implicit;
>      else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
>          pfec |= PFEC_user_mode;
> +    if ( access_type == hvm_access_insn_fetch )
> +        pfec |= PFEC_insn_fetch;
>  
>      rc = hvmemul_virtual_to_linear(
>          seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
> @@ -1071,9 +1073,7 @@ static int __hvmemul_read(
>           (vio->mmio_gla == (addr & PAGE_MASK)) )
>          return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, 
> hvmemul_ctxt, 1);
>  
> -    rc = ((access_type == hvm_access_insn_fetch) ?
> -          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
> -          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
> +    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
>  
>      switch ( rc )
>      {
> @@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
>                                          hvm_access_insn_fetch,
>                                          &hvmemul_ctxt->seg_reg[x86_seg_cs],
>                                          &addr) &&
> -             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
> -                                         sizeof(hvmemul_ctxt->insn_buf),
> -                                         pfec, NULL) == HVMTRANS_okay) ?
> +             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
> +                                        sizeof(hvmemul_ctxt->insn_buf),
> +                                        pfec | PFEC_insn_fetch,
> +                                        NULL) == HVMTRANS_okay) ?
>              sizeof(hvmemul_ctxt->insn_buf) : 0;
>      }
>      else
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3287,15 +3287,6 @@ enum hvm_translation_result hvm_copy_fro
>                        PFEC_page_present | pfec, pfinfo);
>  }
>  
> -enum hvm_translation_result hvm_fetch_from_guest_linear(
> -    void *buf, unsigned long addr, int size, uint32_t pfec,
> -    pagefault_info_t *pfinfo)
> -{
> -    return __hvm_copy(buf, addr, size, current,
> -                      HVMCOPY_from_guest | HVMCOPY_linear,
> -                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
> -}
> -
>  unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int 
> len)
>  {
>      int rc;
> @@ -3741,16 +3732,16 @@ void hvm_ud_intercept(struct cpu_user_re
>      if ( opt_hvm_fep )
>      {
>          const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
> -        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].dpl == 3)
> -            ? PFEC_user_mode : 0;
> +        uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
> +                         ? PFEC_user_mode : 0) | PFEC_insn_fetch;
>          unsigned long addr;
>          char sig[5]; /* ud2; .ascii "xen" */
>  
>          if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
>                                          sizeof(sig), hvm_access_insn_fetch,
>                                          cs, &addr) &&
> -             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
> -                                          walk, NULL) == HVMTRANS_okay) &&
> +             (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> +                                         walk, NULL) == HVMTRANS_okay) &&
>               (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
>          {
>              regs->rip += sizeof(sig);
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -164,8 +164,9 @@ const struct x86_emulate_ops *shadow_ini
>          (!hvm_translate_virtual_addr(
>              x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
>              hvm_access_insn_fetch, sh_ctxt, &addr) &&
> -         !hvm_fetch_from_guest_linear(
> -             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
> +         !hvm_copy_from_guest_linear(
> +             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
> +             PFEC_insn_fetch, NULL))
>          ? sizeof(sh_ctxt->insn_buf) : 0;
>  
>      return &hvm_shadow_emulator_ops;
> @@ -198,8 +199,9 @@ void shadow_continue_emulation(struct sh
>              (!hvm_translate_virtual_addr(
>                  x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
>                  hvm_access_insn_fetch, sh_ctxt, &addr) &&
> -             !hvm_fetch_from_guest_linear(
> -                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, 
> NULL))
> +             !hvm_copy_from_guest_linear(
> +                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
> +                 PFEC_insn_fetch, NULL))
>              ? sizeof(sh_ctxt->insn_buf) : 0;
>          sh_ctxt->insn_buf_eip = regs->rip;
>      }
> --- a/xen/arch/x86/mm/shadow/hvm.c
> +++ b/xen/arch/x86/mm/shadow/hvm.c
> @@ -122,10 +122,10 @@ hvm_read(enum x86_segment seg,
>      if ( rc || !bytes )
>          return rc;
>  
> -    if ( access_type == hvm_access_insn_fetch )
> -        rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
> -    else
> -        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
> +    rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
> +                                    (access_type == hvm_access_insn_fetch
> +                                     ? PFEC_insn_fetch : 0),
> +                                    &pfinfo);
>  
>      switch ( rc )
>      {
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -100,9 +100,6 @@ enum hvm_translation_result hvm_copy_to_
>  enum hvm_translation_result hvm_copy_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> -enum hvm_translation_result hvm_fetch_from_guest_linear(
> -    void *buf, unsigned long addr, int size, uint32_t pfec,
> -    pagefault_info_t *pfinfo);
>  
>  /*
>   * Get a reference on the page under an HVM physical or linear address.  If
> 
> 
> 
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xenproject.org 
> https://lists.xenproject.org/mailman/listinfo/xen-devel 




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-09-03 15:43   ` [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
  2018-09-04 10:01     ` Jan Beulich
@ 2018-09-05  7:20     ` Olaf Hering
  1 sibling, 0 replies; 28+ messages in thread
From: Olaf Hering @ 2018-09-05  7:20 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Paul Durrant, Andrew Cooper


[-- Attachment #1.1: Type: text/plain, Size: 419 bytes --]

Am Mon, 03 Sep 2018 09:43:45 -0600
schrieb "Jan Beulich" <JBeulich@suse.com>:

> It can easily be expressed through hvm_copy_from_guest_linear(), and in
> two cases this even simplifies callers.
> 
> Suggested-by: Paul Durrant <paul.durrant@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Tested-by: Olaf Hering <olaf@aepfle.de>

Olaf

[-- Attachment #1.2: Digitale Signatur von OpenPGP --]
[-- Type: application/pgp-signature, Size: 195 bytes --]

[-- Attachment #2: Type: text/plain, Size: 157 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases
  2018-09-03 15:44   ` [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
  2018-09-03 16:15     ` Paul Durrant
@ 2018-09-05  7:22     ` Olaf Hering
  1 sibling, 0 replies; 28+ messages in thread
From: Olaf Hering @ 2018-09-05  7:22 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel, Paul Durrant, Andrew Cooper


[-- Attachment #1.1: Type: text/plain, Size: 545 bytes --]

Am Mon, 03 Sep 2018 09:44:38 -0600
schrieb "Jan Beulich" <JBeulich@suse.com>:

> Assuming consecutive linear addresses map to all RAM or all MMIO is not
> correct. Nor is assuming that a page straddling MMIO access will access
> the same emulating component for both parts of the access. If a guest
> RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
> a page boundary, issue accesses separately for both parts.

> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Tested-by: Olaf Hering <olaf@aepfle.de>

Olaf

[-- Attachment #1.2: Digitale Signatur von OpenPGP --]
[-- Type: application/pgp-signature, Size: 195 bytes --]

[-- Attachment #2: Type: text/plain, Size: 157 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v3 0/3] x86/HVM: emulation adjustments
  2018-08-30 11:05 [PATCH 0/2] x86/HVM: emulation adjustments Jan Beulich
                   ` (2 preceding siblings ...)
  2018-09-03 15:41 ` [PATCH v2 0/2] x86/HVM: emulation adjustments Jan Beulich
@ 2018-09-06 12:58 ` Jan Beulich
  2018-09-06 13:03   ` [PATCH v3 1/3] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
                     ` (2 more replies)
  3 siblings, 3 replies; 28+ messages in thread
From: Jan Beulich @ 2018-09-06 12:58 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant

1: drop hvm_fetch_from_guest_linear()
2: add known_gla() emulation helper
3: split page straddling emulated accesses in more cases

v3: Split patch 2 out from patch 3.
v2: Patch 1 now builds cleanly (without other patches in place the up-to-
date versions of which are yet to be posted), and patch 2 is no longer RFC.

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v3 1/3] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-09-06 12:58 ` [PATCH v3 0/3] x86/HVM: emulation adjustments Jan Beulich
@ 2018-09-06 13:03   ` Jan Beulich
  2018-09-06 14:51     ` Paul Durrant
  2018-09-06 13:03   ` [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper Jan Beulich
  2018-09-06 13:04   ` [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
  2 siblings, 1 reply; 28+ messages in thread
From: Jan Beulich @ 2018-09-06 13:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant, Tim Deegan

It can easily be expressed through hvm_copy_from_guest_linear(), and in
two cases this even simplifies callers.

Suggested-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Tested-by: Olaf Hering <olaf@aepfle.de>
---
v2: Make sure this compiles standalone. Slightly adjust change in
    hvm_ud_intercept().

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1060,6 +1060,8 @@ static int __hvmemul_read(
         pfec |= PFEC_implicit;
     else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
+    if ( access_type == hvm_access_insn_fetch )
+        pfec |= PFEC_insn_fetch;
 
     rc = hvmemul_virtual_to_linear(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
@@ -1071,9 +1073,7 @@ static int __hvmemul_read(
          (vio->mmio_gla == (addr & PAGE_MASK)) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
-    rc = ((access_type == hvm_access_insn_fetch) ?
-          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
-          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
 
     switch ( rc )
     {
@@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
                                         hvm_access_insn_fetch,
                                         &hvmemul_ctxt->seg_reg[x86_seg_cs],
                                         &addr) &&
-             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
-                                         sizeof(hvmemul_ctxt->insn_buf),
-                                         pfec, NULL) == HVMTRANS_okay) ?
+             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+                                        sizeof(hvmemul_ctxt->insn_buf),
+                                        pfec | PFEC_insn_fetch,
+                                        NULL) == HVMTRANS_okay) ?
             sizeof(hvmemul_ctxt->insn_buf) : 0;
     }
     else
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3286,15 +3286,6 @@ enum hvm_translation_result hvm_copy_fro
                       PFEC_page_present | pfec, pfinfo);
 }
 
-enum hvm_translation_result hvm_fetch_from_guest_linear(
-    void *buf, unsigned long addr, int size, uint32_t pfec,
-    pagefault_info_t *pfinfo)
-{
-    return __hvm_copy(buf, addr, size, current,
-                      HVMCOPY_from_guest | HVMCOPY_linear,
-                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
-}
-
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
 {
     int rc;
@@ -3740,16 +3731,16 @@ void hvm_ud_intercept(struct cpu_user_re
     if ( opt_hvm_fep )
     {
         const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
-        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].dpl == 3)
-            ? PFEC_user_mode : 0;
+        uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
+                         ? PFEC_user_mode : 0) | PFEC_insn_fetch;
         unsigned long addr;
         char sig[5]; /* ud2; .ascii "xen" */
 
         if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
                                         sizeof(sig), hvm_access_insn_fetch,
                                         cs, &addr) &&
-             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
-                                          walk, NULL) == HVMTRANS_okay) &&
+             (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
+                                         walk, NULL) == HVMTRANS_okay) &&
              (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
         {
             regs->rip += sizeof(sig);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -164,8 +164,9 @@ const struct x86_emulate_ops *shadow_ini
         (!hvm_translate_virtual_addr(
             x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
             hvm_access_insn_fetch, sh_ctxt, &addr) &&
-         !hvm_fetch_from_guest_linear(
-             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+         !hvm_copy_from_guest_linear(
+             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+             PFEC_insn_fetch, NULL))
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
     return &hvm_shadow_emulator_ops;
@@ -198,8 +199,9 @@ void shadow_continue_emulation(struct sh
             (!hvm_translate_virtual_addr(
                 x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
                 hvm_access_insn_fetch, sh_ctxt, &addr) &&
-             !hvm_fetch_from_guest_linear(
-                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+             !hvm_copy_from_guest_linear(
+                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+                 PFEC_insn_fetch, NULL))
             ? sizeof(sh_ctxt->insn_buf) : 0;
         sh_ctxt->insn_buf_eip = regs->rip;
     }
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -122,10 +122,10 @@ hvm_read(enum x86_segment seg,
     if ( rc || !bytes )
         return rc;
 
-    if ( access_type == hvm_access_insn_fetch )
-        rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
-    else
-        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
+    rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
+                                    (access_type == hvm_access_insn_fetch
+                                     ? PFEC_insn_fetch : 0),
+                                    &pfinfo);
 
     switch ( rc )
     {
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -100,9 +100,6 @@ enum hvm_translation_result hvm_copy_to_
 enum hvm_translation_result hvm_copy_from_guest_linear(
     void *buf, unsigned long addr, int size, uint32_t pfec,
     pagefault_info_t *pfinfo);
-enum hvm_translation_result hvm_fetch_from_guest_linear(
-    void *buf, unsigned long addr, int size, uint32_t pfec,
-    pagefault_info_t *pfinfo);
 
 /*
  * Get a reference on the page under an HVM physical or linear address.  If




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper
  2018-09-06 12:58 ` [PATCH v3 0/3] x86/HVM: emulation adjustments Jan Beulich
  2018-09-06 13:03   ` [PATCH v3 1/3] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
@ 2018-09-06 13:03   ` Jan Beulich
  2018-09-06 13:12     ` Paul Durrant
  2018-09-06 13:04   ` [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
  2 siblings, 1 reply; 28+ messages in thread
From: Jan Beulich @ 2018-09-06 13:03 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant

... as a central place to do respective checking for whether the
translation for the linear address is available as well as usable.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v3: Split from subsequent patch.

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1041,6 +1041,26 @@ static inline int hvmemul_linear_mmio_wr
                                       pfec, hvmemul_ctxt, translate);
 }
 
+static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t pfec)
+{
+    const struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
+
+    if ( pfec & PFEC_write_access )
+    {
+        if ( !vio->mmio_access.write_access )
+            return false;
+    }
+    else if ( pfec & PFEC_insn_fetch )
+    {
+        if ( !vio->mmio_access.insn_fetch )
+            return false;
+    }
+    else if ( !vio->mmio_access.read_access )
+            return false;
+
+    return vio->mmio_gla == (addr & PAGE_MASK);
+}
+
 static int __hvmemul_read(
     enum x86_segment seg,
     unsigned long offset,
@@ -1049,11 +1069,9 @@ static int __hvmemul_read(
     enum hvm_access_type access_type,
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
-    struct vcpu *curr = current;
     pagefault_info_t pfinfo;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
     int rc;
 
     if ( is_x86_system_segment(seg) )
@@ -1067,10 +1085,7 @@ static int __hvmemul_read(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY || !bytes )
         return rc;
-    if ( ((access_type != hvm_access_insn_fetch
-           ? vio->mmio_access.read_access
-           : vio->mmio_access.insn_fetch)) &&
-         (vio->mmio_gla == (addr & PAGE_MASK)) )
+    if ( known_gla(addr, bytes, pfec) )
         return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
     rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
@@ -1171,10 +1186,8 @@ static int hvmemul_write(
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    struct vcpu *curr = current;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
-    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
     int rc;
     void *mapping;
 
@@ -1188,8 +1201,7 @@ static int hvmemul_write(
     if ( rc != X86EMUL_OKAY || !bytes )
         return rc;
 
-    if ( vio->mmio_access.write_access &&
-         (vio->mmio_gla == (addr & PAGE_MASK)) )
+    if ( known_gla(addr, bytes, pfec) )
         return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
     mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
@@ -1218,7 +1230,6 @@ static int hvmemul_rmw(
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
-    struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
     int rc;
     void *mapping;
 
@@ -1244,8 +1255,7 @@ static int hvmemul_rmw(
     else
     {
         unsigned long data = 0;
-        bool known_gpfn = vio->mmio_access.write_access &&
-                          vio->mmio_gla == (addr & PAGE_MASK);
+        bool known_gpfn = known_gla(addr, bytes, pfec);
 
         if ( bytes > sizeof(data) )
             return X86EMUL_UNHANDLEABLE;





_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in more cases
  2018-09-06 12:58 ` [PATCH v3 0/3] x86/HVM: emulation adjustments Jan Beulich
  2018-09-06 13:03   ` [PATCH v3 1/3] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
  2018-09-06 13:03   ` [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper Jan Beulich
@ 2018-09-06 13:04   ` Jan Beulich
  2018-09-06 13:14     ` Paul Durrant
  2 siblings, 1 reply; 28+ messages in thread
From: Jan Beulich @ 2018-09-06 13:04 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper, Olaf Hering, Paul Durrant

Assuming consecutive linear addresses map to all RAM or all MMIO is not
correct. Nor is assuming that a page straddling MMIO access will access
the same emulating component for both parts of the access. If a guest
RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
a page boundary, issue accesses separately for both parts.

The extra call to known_gla() from hvmemul_write() is just to preserve
original behavior; for consistency the check also gets added to
hvmemul_rmw() (albeit I continue to be unsure whether we wouldn't better
drop both).

Note that the correctness of this depends on the MMIO caching used
elsewhere in the emulation code.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Tested-by: Olaf Hering <olaf@aepfle.de>
---
v3: Move introduction of known_gla() to a prereq patch. Mirror check
    using the function into hvmemul_rmw().
v2: Also handle hvmemul_{write,rmw}().

--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1058,7 +1058,91 @@ static bool known_gla(unsigned long addr
     else if ( !vio->mmio_access.read_access )
             return false;
 
-    return vio->mmio_gla == (addr & PAGE_MASK);
+    return (vio->mmio_gla == (addr & PAGE_MASK) &&
+            (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
+}
+
+static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
+                       uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    pagefault_info_t pfinfo;
+    int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+
+    switch ( rc )
+    {
+        unsigned int offset, part1;
+
+    case HVMTRANS_okay:
+        return X86EMUL_OKAY;
+
+    case HVMTRANS_bad_linear_to_gfn:
+        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+
+    case HVMTRANS_bad_gfn_to_mfn:
+        if ( pfec & PFEC_insn_fetch )
+            return X86EMUL_UNHANDLEABLE;
+
+        offset = addr & ~PAGE_MASK;
+        if ( offset + bytes <= PAGE_SIZE )
+            return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
+                                            hvmemul_ctxt,
+                                            known_gla(addr, bytes, pfec));
+
+        /* Split the access at the page boundary. */
+        part1 = PAGE_SIZE - offset;
+        rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
+        if ( rc == X86EMUL_OKAY )
+            rc = linear_read(addr + part1, bytes - part1, p_data + part1,
+                             pfec, hvmemul_ctxt);
+        return rc;
+
+    case HVMTRANS_gfn_paged_out:
+    case HVMTRANS_gfn_shared:
+        return X86EMUL_RETRY;
+    }
+
+    return X86EMUL_UNHANDLEABLE;
+}
+
+static int linear_write(unsigned long addr, unsigned int bytes, void *p_data,
+                        uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    pagefault_info_t pfinfo;
+    int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+
+    switch ( rc )
+    {
+        unsigned int offset, part1;
+
+    case HVMTRANS_okay:
+        return X86EMUL_OKAY;
+
+    case HVMTRANS_bad_linear_to_gfn:
+        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+
+    case HVMTRANS_bad_gfn_to_mfn:
+        offset = addr & ~PAGE_MASK;
+        if ( offset + bytes <= PAGE_SIZE )
+            return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
+                                             hvmemul_ctxt,
+                                             known_gla(addr, bytes, pfec));
+
+        /* Split the access at the page boundary. */
+        part1 = PAGE_SIZE - offset;
+        rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
+        if ( rc == X86EMUL_OKAY )
+            rc = linear_write(addr + part1, bytes - part1, p_data + part1,
+                              pfec, hvmemul_ctxt);
+        return rc;
+
+    case HVMTRANS_gfn_paged_out:
+    case HVMTRANS_gfn_shared:
+        return X86EMUL_RETRY;
+    }
+
+    return X86EMUL_UNHANDLEABLE;
 }
 
 static int __hvmemul_read(
@@ -1069,7 +1153,6 @@ static int __hvmemul_read(
     enum hvm_access_type access_type,
     struct hvm_emulate_ctxt *hvmemul_ctxt)
 {
-    pagefault_info_t pfinfo;
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present;
     int rc;
@@ -1085,31 +1168,8 @@ static int __hvmemul_read(
         seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
     if ( rc != X86EMUL_OKAY || !bytes )
         return rc;
-    if ( known_gla(addr, bytes, pfec) )
-        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
 
-    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
-
-    switch ( rc )
-    {
-    case HVMTRANS_okay:
-        break;
-    case HVMTRANS_bad_linear_to_gfn:
-        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
-        return X86EMUL_EXCEPTION;
-    case HVMTRANS_bad_gfn_to_mfn:
-        if ( access_type == hvm_access_insn_fetch )
-            return X86EMUL_UNHANDLEABLE;
-
-        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
-    case HVMTRANS_gfn_paged_out:
-    case HVMTRANS_gfn_shared:
-        return X86EMUL_RETRY;
-    default:
-        return X86EMUL_UNHANDLEABLE;
-    }
-
-    return X86EMUL_OKAY;
+    return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
 }
 
 static int hvmemul_read(
@@ -1189,7 +1249,7 @@ static int hvmemul_write(
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
     int rc;
-    void *mapping;
+    void *mapping = NULL;
 
     if ( is_x86_system_segment(seg) )
         pfec |= PFEC_implicit;
@@ -1201,15 +1261,15 @@ static int hvmemul_write(
     if ( rc != X86EMUL_OKAY || !bytes )
         return rc;
 
-    if ( known_gla(addr, bytes, pfec) )
-        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
-
-    mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
-    if ( IS_ERR(mapping) )
-        return ~PTR_ERR(mapping);
+    if ( !known_gla(addr, bytes, pfec) )
+    {
+        mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+        if ( IS_ERR(mapping) )
+             return ~PTR_ERR(mapping);
+    }
 
     if ( !mapping )
-        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
+        return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt);
 
     memcpy(mapping, p_data, bytes);
 
@@ -1231,7 +1291,7 @@ static int hvmemul_rmw(
     unsigned long addr, reps = 1;
     uint32_t pfec = PFEC_page_present | PFEC_write_access;
     int rc;
-    void *mapping;
+    void *mapping = NULL;
 
     rc = hvmemul_virtual_to_linear(
         seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
@@ -1243,9 +1303,12 @@ static int hvmemul_rmw(
     else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
 
-    mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
-    if ( IS_ERR(mapping) )
-        return ~PTR_ERR(mapping);
+    if ( !known_gla(addr, bytes, pfec) )
+    {
+        mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+        if ( IS_ERR(mapping) )
+            return ~PTR_ERR(mapping);
+    }
 
     if ( mapping )
     {
@@ -1255,17 +1318,14 @@ static int hvmemul_rmw(
     else
     {
         unsigned long data = 0;
-        bool known_gpfn = known_gla(addr, bytes, pfec);
 
         if ( bytes > sizeof(data) )
             return X86EMUL_UNHANDLEABLE;
-        rc = hvmemul_linear_mmio_read(addr, bytes, &data, pfec, hvmemul_ctxt,
-                                      known_gpfn);
+        rc = linear_read(addr, bytes, &data, pfec, hvmemul_ctxt);
         if ( rc == X86EMUL_OKAY )
             rc = x86_emul_rmw(&data, bytes, eflags, state, ctxt);
         if ( rc == X86EMUL_OKAY )
-            rc = hvmemul_linear_mmio_write(addr, bytes, &data, pfec,
-                                           hvmemul_ctxt, known_gpfn);
+            rc = linear_write(addr, bytes, &data, pfec, hvmemul_ctxt);
     }
 
     return rc;




_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper
  2018-09-06 13:03   ` [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper Jan Beulich
@ 2018-09-06 13:12     ` Paul Durrant
  2018-09-06 13:22       ` Jan Beulich
  0 siblings, 1 reply; 28+ messages in thread
From: Paul Durrant @ 2018-09-06 13:12 UTC (permalink / raw)
  To: 'Jan Beulich', xen-devel; +Cc: Andrew Cooper, Olaf Hering

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 06 September 2018 14:04
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>
> Subject: [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper
> 
> ... as a central place to do respective checking for whether the
> translation for the linear address is available as well as usable.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v3: Split from subsequent patch.

Much nicer :-)

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1041,6 +1041,26 @@ static inline int hvmemul_linear_mmio_wr
>                                        pfec, hvmemul_ctxt, translate);
>  }
> 
> +static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t
> pfec)
> +{
> +    const struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
> +
> +    if ( pfec & PFEC_write_access )
> +    {
> +        if ( !vio->mmio_access.write_access )
> +            return false;
> +    }
> +    else if ( pfec & PFEC_insn_fetch )
> +    {
> +        if ( !vio->mmio_access.insn_fetch )
> +            return false;
> +    }
> +    else if ( !vio->mmio_access.read_access )
> +            return false;
> +
> +    return vio->mmio_gla == (addr & PAGE_MASK);
> +}
> +
>  static int __hvmemul_read(
>      enum x86_segment seg,
>      unsigned long offset,
> @@ -1049,11 +1069,9 @@ static int __hvmemul_read(
>      enum hvm_access_type access_type,
>      struct hvm_emulate_ctxt *hvmemul_ctxt)
>  {
> -    struct vcpu *curr = current;
>      pagefault_info_t pfinfo;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present;
> -    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
>      int rc;
> 
>      if ( is_x86_system_segment(seg) )
> @@ -1067,10 +1085,7 @@ static int __hvmemul_read(
>          seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
>      if ( rc != X86EMUL_OKAY || !bytes )
>          return rc;
> -    if ( ((access_type != hvm_access_insn_fetch
> -           ? vio->mmio_access.read_access
> -           : vio->mmio_access.insn_fetch)) &&
> -         (vio->mmio_gla == (addr & PAGE_MASK)) )
> +    if ( known_gla(addr, bytes, pfec) )
>          return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> 
>      rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
> @@ -1171,10 +1186,8 @@ static int hvmemul_write(
>  {
>      struct hvm_emulate_ctxt *hvmemul_ctxt =
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
> -    struct vcpu *curr = current;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
> -    struct hvm_vcpu_io *vio = &curr->arch.hvm.hvm_io;
>      int rc;
>      void *mapping;
> 
> @@ -1188,8 +1201,7 @@ static int hvmemul_write(
>      if ( rc != X86EMUL_OKAY || !bytes )
>          return rc;
> 
> -    if ( vio->mmio_access.write_access &&
> -         (vio->mmio_gla == (addr & PAGE_MASK)) )
> +    if ( known_gla(addr, bytes, pfec) )
>          return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> 
>      mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
> @@ -1218,7 +1230,6 @@ static int hvmemul_rmw(
>          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
> -    struct hvm_vcpu_io *vio = &current->arch.hvm.hvm_io;
>      int rc;
>      void *mapping;
> 
> @@ -1244,8 +1255,7 @@ static int hvmemul_rmw(
>      else
>      {
>          unsigned long data = 0;
> -        bool known_gpfn = vio->mmio_access.write_access &&
> -                          vio->mmio_gla == (addr & PAGE_MASK);
> +        bool known_gpfn = known_gla(addr, bytes, pfec);
> 
>          if ( bytes > sizeof(data) )
>              return X86EMUL_UNHANDLEABLE;
> 
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in more cases
  2018-09-06 13:04   ` [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
@ 2018-09-06 13:14     ` Paul Durrant
  0 siblings, 0 replies; 28+ messages in thread
From: Paul Durrant @ 2018-09-06 13:14 UTC (permalink / raw)
  To: 'Jan Beulich', xen-devel; +Cc: Andrew Cooper, Olaf Hering

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 06 September 2018 14:04
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>
> Subject: [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in
> more cases
> 
> Assuming consecutive linear addresses map to all RAM or all MMIO is not
> correct. Nor is assuming that a page straddling MMIO access will access
> the same emulating component for both parts of the access. If a guest
> RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
> a page boundary, issue accesses separately for both parts.
> 
> The extra call to known_gla() from hvmemul_write() is just to preserve
> original behavior; for consistency the check also gets added to
> hvmemul_rmw() (albeit I continue to be unsure whether we wouldn't better
> drop both).
> 
> Note that the correctness of this depends on the MMIO caching used
> elsewhere in the emulation code.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Tested-by: Olaf Hering <olaf@aepfle.de>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> ---
> v3: Move introduction of known_gla() to a prereq patch. Mirror check
>     using the function into hvmemul_rmw().
> v2: Also handle hvmemul_{write,rmw}().
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1058,7 +1058,91 @@ static bool known_gla(unsigned long addr
>      else if ( !vio->mmio_access.read_access )
>              return false;
> 
> -    return vio->mmio_gla == (addr & PAGE_MASK);
> +    return (vio->mmio_gla == (addr & PAGE_MASK) &&
> +            (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
> +}
> +
> +static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
> +                       uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
> +{
> +    pagefault_info_t pfinfo;
> +    int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec,
> &pfinfo);
> +
> +    switch ( rc )
> +    {
> +        unsigned int offset, part1;
> +
> +    case HVMTRANS_okay:
> +        return X86EMUL_OKAY;
> +
> +    case HVMTRANS_bad_linear_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> +        return X86EMUL_EXCEPTION;
> +
> +    case HVMTRANS_bad_gfn_to_mfn:
> +        if ( pfec & PFEC_insn_fetch )
> +            return X86EMUL_UNHANDLEABLE;
> +
> +        offset = addr & ~PAGE_MASK;
> +        if ( offset + bytes <= PAGE_SIZE )
> +            return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> +                                            hvmemul_ctxt,
> +                                            known_gla(addr, bytes, pfec));
> +
> +        /* Split the access at the page boundary. */
> +        part1 = PAGE_SIZE - offset;
> +        rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
> +        if ( rc == X86EMUL_OKAY )
> +            rc = linear_read(addr + part1, bytes - part1, p_data + part1,
> +                             pfec, hvmemul_ctxt);
> +        return rc;
> +
> +    case HVMTRANS_gfn_paged_out:
> +    case HVMTRANS_gfn_shared:
> +        return X86EMUL_RETRY;
> +    }
> +
> +    return X86EMUL_UNHANDLEABLE;
> +}
> +
> +static int linear_write(unsigned long addr, unsigned int bytes, void *p_data,
> +                        uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
> +{
> +    pagefault_info_t pfinfo;
> +    int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
> +
> +    switch ( rc )
> +    {
> +        unsigned int offset, part1;
> +
> +    case HVMTRANS_okay:
> +        return X86EMUL_OKAY;
> +
> +    case HVMTRANS_bad_linear_to_gfn:
> +        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> +        return X86EMUL_EXCEPTION;
> +
> +    case HVMTRANS_bad_gfn_to_mfn:
> +        offset = addr & ~PAGE_MASK;
> +        if ( offset + bytes <= PAGE_SIZE )
> +            return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> +                                             hvmemul_ctxt,
> +                                             known_gla(addr, bytes, pfec));
> +
> +        /* Split the access at the page boundary. */
> +        part1 = PAGE_SIZE - offset;
> +        rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
> +        if ( rc == X86EMUL_OKAY )
> +            rc = linear_write(addr + part1, bytes - part1, p_data + part1,
> +                              pfec, hvmemul_ctxt);
> +        return rc;
> +
> +    case HVMTRANS_gfn_paged_out:
> +    case HVMTRANS_gfn_shared:
> +        return X86EMUL_RETRY;
> +    }
> +
> +    return X86EMUL_UNHANDLEABLE;
>  }
> 
>  static int __hvmemul_read(
> @@ -1069,7 +1153,6 @@ static int __hvmemul_read(
>      enum hvm_access_type access_type,
>      struct hvm_emulate_ctxt *hvmemul_ctxt)
>  {
> -    pagefault_info_t pfinfo;
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present;
>      int rc;
> @@ -1085,31 +1168,8 @@ static int __hvmemul_read(
>          seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
>      if ( rc != X86EMUL_OKAY || !bytes )
>          return rc;
> -    if ( known_gla(addr, bytes, pfec) )
> -        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> 
> -    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
> -
> -    switch ( rc )
> -    {
> -    case HVMTRANS_okay:
> -        break;
> -    case HVMTRANS_bad_linear_to_gfn:
> -        x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
> -        return X86EMUL_EXCEPTION;
> -    case HVMTRANS_bad_gfn_to_mfn:
> -        if ( access_type == hvm_access_insn_fetch )
> -            return X86EMUL_UNHANDLEABLE;
> -
> -        return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> -    case HVMTRANS_gfn_paged_out:
> -    case HVMTRANS_gfn_shared:
> -        return X86EMUL_RETRY;
> -    default:
> -        return X86EMUL_UNHANDLEABLE;
> -    }
> -
> -    return X86EMUL_OKAY;
> +    return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
>  }
> 
>  static int hvmemul_read(
> @@ -1189,7 +1249,7 @@ static int hvmemul_write(
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
>      int rc;
> -    void *mapping;
> +    void *mapping = NULL;
> 
>      if ( is_x86_system_segment(seg) )
>          pfec |= PFEC_implicit;
> @@ -1201,15 +1261,15 @@ static int hvmemul_write(
>      if ( rc != X86EMUL_OKAY || !bytes )
>          return rc;
> 
> -    if ( known_gla(addr, bytes, pfec) )
> -        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> -
> -    mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
> -    if ( IS_ERR(mapping) )
> -        return ~PTR_ERR(mapping);
> +    if ( !known_gla(addr, bytes, pfec) )
> +    {
> +        mapping = hvmemul_map_linear_addr(addr, bytes, pfec,
> hvmemul_ctxt);
> +        if ( IS_ERR(mapping) )
> +             return ~PTR_ERR(mapping);
> +    }
> 
>      if ( !mapping )
> -        return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 0);
> +        return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt);
> 
>      memcpy(mapping, p_data, bytes);
> 
> @@ -1231,7 +1291,7 @@ static int hvmemul_rmw(
>      unsigned long addr, reps = 1;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
>      int rc;
> -    void *mapping;
> +    void *mapping = NULL;
> 
>      rc = hvmemul_virtual_to_linear(
>          seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
> @@ -1243,9 +1303,12 @@ static int hvmemul_rmw(
>      else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
>          pfec |= PFEC_user_mode;
> 
> -    mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
> -    if ( IS_ERR(mapping) )
> -        return ~PTR_ERR(mapping);
> +    if ( !known_gla(addr, bytes, pfec) )
> +    {
> +        mapping = hvmemul_map_linear_addr(addr, bytes, pfec,
> hvmemul_ctxt);
> +        if ( IS_ERR(mapping) )
> +            return ~PTR_ERR(mapping);
> +    }
> 
>      if ( mapping )
>      {
> @@ -1255,17 +1318,14 @@ static int hvmemul_rmw(
>      else
>      {
>          unsigned long data = 0;
> -        bool known_gpfn = known_gla(addr, bytes, pfec);
> 
>          if ( bytes > sizeof(data) )
>              return X86EMUL_UNHANDLEABLE;
> -        rc = hvmemul_linear_mmio_read(addr, bytes, &data, pfec,
> hvmemul_ctxt,
> -                                      known_gpfn);
> +        rc = linear_read(addr, bytes, &data, pfec, hvmemul_ctxt);
>          if ( rc == X86EMUL_OKAY )
>              rc = x86_emul_rmw(&data, bytes, eflags, state, ctxt);
>          if ( rc == X86EMUL_OKAY )
> -            rc = hvmemul_linear_mmio_write(addr, bytes, &data, pfec,
> -                                           hvmemul_ctxt, known_gpfn);
> +            rc = linear_write(addr, bytes, &data, pfec, hvmemul_ctxt);
>      }
> 
>      return rc;
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper
  2018-09-06 13:12     ` Paul Durrant
@ 2018-09-06 13:22       ` Jan Beulich
  2018-09-06 14:50         ` Paul Durrant
  0 siblings, 1 reply; 28+ messages in thread
From: Jan Beulich @ 2018-09-06 13:22 UTC (permalink / raw)
  To: Paul Durrant; +Cc: Andrew Cooper, Olaf Hering, xen-devel

>>> On 06.09.18 at 15:12, <Paul.Durrant@citrix.com> wrote:
>>  -----Original Message-----
>> From: Jan Beulich [mailto:JBeulich@suse.com]
>> Sent: 06 September 2018 14:04
>> To: xen-devel <xen-devel@lists.xenproject.org>
>> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
>> <Andrew.Cooper3@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>
>> Subject: [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper
>> 
>> ... as a central place to do respective checking for whether the
>> translation for the linear address is available as well as usable.
>> 
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>> ---
>> v3: Split from subsequent patch.
> 
> Much nicer :-)
> 
> Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

Thanks; any chance of also getting an ack for patch 1?

Jan



_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper
  2018-09-06 13:22       ` Jan Beulich
@ 2018-09-06 14:50         ` Paul Durrant
  0 siblings, 0 replies; 28+ messages in thread
From: Paul Durrant @ 2018-09-06 14:50 UTC (permalink / raw)
  To: 'Jan Beulich'; +Cc: Andrew Cooper, Olaf Hering, xen-devel

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 06 September 2018 14:22
> To: Paul Durrant <Paul.Durrant@citrix.com>
> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; xen-devel <xen-
> devel@lists.xenproject.org>
> Subject: RE: [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper
> 
> >>> On 06.09.18 at 15:12, <Paul.Durrant@citrix.com> wrote:
> >>  -----Original Message-----
> >> From: Jan Beulich [mailto:JBeulich@suse.com]
> >> Sent: 06 September 2018 14:04
> >> To: xen-devel <xen-devel@lists.xenproject.org>
> >> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
> >> <Andrew.Cooper3@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>
> >> Subject: [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper
> >>
> >> ... as a central place to do respective checking for whether the
> >> translation for the linear address is available as well as usable.
> >>
> >> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> >> ---
> >> v3: Split from subsequent patch.
> >
> > Much nicer :-)
> >
> > Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> 
> Thanks; any chance of also getting an ack for patch 1?
> 

Oh sorry, I thought I already did... Coming up in a moment...

  Paul

> Jan
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v3 1/3] x86/HVM: drop hvm_fetch_from_guest_linear()
  2018-09-06 13:03   ` [PATCH v3 1/3] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
@ 2018-09-06 14:51     ` Paul Durrant
  0 siblings, 0 replies; 28+ messages in thread
From: Paul Durrant @ 2018-09-06 14:51 UTC (permalink / raw)
  To: 'Jan Beulich', xen-devel
  Cc: Andrew Cooper, Olaf Hering, Tim (Xen.org)

> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@suse.com]
> Sent: 06 September 2018 14:03
> To: xen-devel <xen-devel@lists.xenproject.org>
> Cc: Olaf Hering <olaf@aepfle.de>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Paul Durrant <Paul.Durrant@citrix.com>;
> Tim (Xen.org) <tim@xen.org>
> Subject: [PATCH v3 1/3] x86/HVM: drop hvm_fetch_from_guest_linear()
> 
> It can easily be expressed through hvm_copy_from_guest_linear(), and in
> two cases this even simplifies callers.
> 
> Suggested-by: Paul Durrant <paul.durrant@citrix.com>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> Tested-by: Olaf Hering <olaf@aepfle.de>

Reviewed-by: Paul Durrant <paul.durrant@citrix.com>

> ---
> v2: Make sure this compiles standalone. Slightly adjust change in
>     hvm_ud_intercept().
> 
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1060,6 +1060,8 @@ static int __hvmemul_read(
>          pfec |= PFEC_implicit;
>      else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
>          pfec |= PFEC_user_mode;
> +    if ( access_type == hvm_access_insn_fetch )
> +        pfec |= PFEC_insn_fetch;
> 
>      rc = hvmemul_virtual_to_linear(
>          seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
> @@ -1071,9 +1073,7 @@ static int __hvmemul_read(
>           (vio->mmio_gla == (addr & PAGE_MASK)) )
>          return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
> hvmemul_ctxt, 1);
> 
> -    rc = ((access_type == hvm_access_insn_fetch) ?
> -          hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
> -          hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
> +    rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
> 
>      switch ( rc )
>      {
> @@ -2512,9 +2512,10 @@ void hvm_emulate_init_per_insn(
>                                          hvm_access_insn_fetch,
>                                          &hvmemul_ctxt->seg_reg[x86_seg_cs],
>                                          &addr) &&
> -             hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
> -                                         sizeof(hvmemul_ctxt->insn_buf),
> -                                         pfec, NULL) == HVMTRANS_okay) ?
> +             hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
> +                                        sizeof(hvmemul_ctxt->insn_buf),
> +                                        pfec | PFEC_insn_fetch,
> +                                        NULL) == HVMTRANS_okay) ?
>              sizeof(hvmemul_ctxt->insn_buf) : 0;
>      }
>      else
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3286,15 +3286,6 @@ enum hvm_translation_result hvm_copy_fro
>                        PFEC_page_present | pfec, pfinfo);
>  }
> 
> -enum hvm_translation_result hvm_fetch_from_guest_linear(
> -    void *buf, unsigned long addr, int size, uint32_t pfec,
> -    pagefault_info_t *pfinfo)
> -{
> -    return __hvm_copy(buf, addr, size, current,
> -                      HVMCOPY_from_guest | HVMCOPY_linear,
> -                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
> -}
> -
>  unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int
> len)
>  {
>      int rc;
> @@ -3740,16 +3731,16 @@ void hvm_ud_intercept(struct cpu_user_re
>      if ( opt_hvm_fep )
>      {
>          const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
> -        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].dpl == 3)
> -            ? PFEC_user_mode : 0;
> +        uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
> +                         ? PFEC_user_mode : 0) | PFEC_insn_fetch;
>          unsigned long addr;
>          char sig[5]; /* ud2; .ascii "xen" */
> 
>          if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
>                                          sizeof(sig), hvm_access_insn_fetch,
>                                          cs, &addr) &&
> -             (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
> -                                          walk, NULL) == HVMTRANS_okay) &&
> +             (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
> +                                         walk, NULL) == HVMTRANS_okay) &&
>               (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
>          {
>              regs->rip += sizeof(sig);
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -164,8 +164,9 @@ const struct x86_emulate_ops *shadow_ini
>          (!hvm_translate_virtual_addr(
>              x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
>              hvm_access_insn_fetch, sh_ctxt, &addr) &&
> -         !hvm_fetch_from_guest_linear(
> -             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
> +         !hvm_copy_from_guest_linear(
> +             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
> +             PFEC_insn_fetch, NULL))
>          ? sizeof(sh_ctxt->insn_buf) : 0;
> 
>      return &hvm_shadow_emulator_ops;
> @@ -198,8 +199,9 @@ void shadow_continue_emulation(struct sh
>              (!hvm_translate_virtual_addr(
>                  x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
>                  hvm_access_insn_fetch, sh_ctxt, &addr) &&
> -             !hvm_fetch_from_guest_linear(
> -                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
> +             !hvm_copy_from_guest_linear(
> +                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
> +                 PFEC_insn_fetch, NULL))
>              ? sizeof(sh_ctxt->insn_buf) : 0;
>          sh_ctxt->insn_buf_eip = regs->rip;
>      }
> --- a/xen/arch/x86/mm/shadow/hvm.c
> +++ b/xen/arch/x86/mm/shadow/hvm.c
> @@ -122,10 +122,10 @@ hvm_read(enum x86_segment seg,
>      if ( rc || !bytes )
>          return rc;
> 
> -    if ( access_type == hvm_access_insn_fetch )
> -        rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
> -    else
> -        rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
> +    rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
> +                                    (access_type == hvm_access_insn_fetch
> +                                     ? PFEC_insn_fetch : 0),
> +                                    &pfinfo);
> 
>      switch ( rc )
>      {
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -100,9 +100,6 @@ enum hvm_translation_result hvm_copy_to_
>  enum hvm_translation_result hvm_copy_from_guest_linear(
>      void *buf, unsigned long addr, int size, uint32_t pfec,
>      pagefault_info_t *pfinfo);
> -enum hvm_translation_result hvm_fetch_from_guest_linear(
> -    void *buf, unsigned long addr, int size, uint32_t pfec,
> -    pagefault_info_t *pfinfo);
> 
>  /*
>   * Get a reference on the page under an HVM physical or linear address.  If
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 28+ messages in thread

end of thread, other threads:[~2018-09-06 14:51 UTC | newest]

Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-30 11:05 [PATCH 0/2] x86/HVM: emulation adjustments Jan Beulich
2018-08-30 11:09 ` [PATCH 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
2018-08-30 11:18   ` Andrew Cooper
2018-08-30 12:02     ` Jan Beulich
2018-08-30 12:22       ` Andrew Cooper
2018-08-30 12:28         ` Jan Beulich
2018-08-30 11:24   ` Andrew Cooper
2018-08-30 11:09 ` [PATCH RFC 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
2018-09-03  9:11   ` Paul Durrant
2018-09-03 15:41 ` [PATCH v2 0/2] x86/HVM: emulation adjustments Jan Beulich
2018-09-03 15:43   ` [PATCH v2 1/2] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
2018-09-04 10:01     ` Jan Beulich
2018-09-05  7:20     ` Olaf Hering
2018-09-03 15:44   ` [PATCH v2 2/2] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
2018-09-03 16:15     ` Paul Durrant
2018-09-04  7:43       ` Jan Beulich
2018-09-04  8:15         ` Paul Durrant
2018-09-04  8:46           ` Jan Beulich
2018-09-05  7:22     ` Olaf Hering
2018-09-06 12:58 ` [PATCH v3 0/3] x86/HVM: emulation adjustments Jan Beulich
2018-09-06 13:03   ` [PATCH v3 1/3] x86/HVM: drop hvm_fetch_from_guest_linear() Jan Beulich
2018-09-06 14:51     ` Paul Durrant
2018-09-06 13:03   ` [PATCH v3 2/3] x86/HVM: add known_gla() emulation helper Jan Beulich
2018-09-06 13:12     ` Paul Durrant
2018-09-06 13:22       ` Jan Beulich
2018-09-06 14:50         ` Paul Durrant
2018-09-06 13:04   ` [PATCH v3 3/3] x86/HVM: split page straddling emulated accesses in more cases Jan Beulich
2018-09-06 13:14     ` Paul Durrant

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.