All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexandru Stefan ISAILA <aisaila@bitdefender.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "tamas@tklengyel.com" <tamas@tklengyel.com>,
	"wei.liu2@citrix.com" <wei.liu2@citrix.com>,
	"rcojocaru@bitdefender.com" <rcojocaru@bitdefender.com>,
	"george.dunlap@eu.citrix.com" <george.dunlap@eu.citrix.com>,
	"andrew.cooper3@citrix.com" <andrew.cooper3@citrix.com>,
	"paul.durrant@citrix.com" <paul.durrant@citrix.com>,
	"jbeulich@suse.com" <jbeulich@suse.com>,
	Alexandru Stefan ISAILA <aisaila@bitdefender.com>,
	"roger.pau@citrix.com" <roger.pau@citrix.com>
Subject: [PATCH v4 1/2] x86/emulate: Move hvmemul_linear_to_phys
Date: Mon, 20 May 2019 12:55:10 +0000	[thread overview]
Message-ID: <20190520125454.14805-1-aisaila@bitdefender.com> (raw)

Thiis is done so hvmemul_linear_to_phys() can be called from
hvmemul_send_vm_event().

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
---
 xen/arch/x86/hvm/emulate.c | 181 ++++++++++++++++++-------------------
 1 file changed, 90 insertions(+), 91 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 8659c89862..254ff6515d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -530,6 +530,95 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
     return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
 }
 
+/*
+ * Convert addr from linear to physical form, valid over the range
+ * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
+ * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
+ * @pfec indicates the access checks to be performed during page-table walks.
+ */
+static int hvmemul_linear_to_phys(
+    unsigned long addr,
+    paddr_t *paddr,
+    unsigned int bytes_per_rep,
+    unsigned long *reps,
+    uint32_t pfec,
+    struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    struct vcpu *curr = current;
+    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
+    int reverse;
+
+    /*
+     * Clip repetitions to a sensible maximum. This avoids extensive looping in
+     * this function while still amortising the cost of I/O trap-and-emulate.
+     */
+    *reps = min_t(unsigned long, *reps, 4096);
+
+    /* With no paging it's easy: linear == physical. */
+    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
+    {
+        *paddr = addr;
+        return X86EMUL_OKAY;
+    }
+
+    /* Reverse mode if this is a backwards multi-iteration string operation. */
+    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
+
+    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
+    {
+        /* Do page-straddling first iteration forwards via recursion. */
+        paddr_t _paddr;
+        unsigned long one_rep = 1;
+        int rc = hvmemul_linear_to_phys(
+            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
+        if ( rc != X86EMUL_OKAY )
+            return rc;
+        pfn = _paddr >> PAGE_SHIFT;
+    }
+    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
+    {
+        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
+            return X86EMUL_RETRY;
+        *reps = 0;
+        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+    }
+
+    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
+    todo = *reps * bytes_per_rep;
+    for ( i = 1; done < todo; i++ )
+    {
+        /* Get the next PFN in the range. */
+        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
+        npfn = paging_gva_to_gfn(curr, addr, &pfec);
+
+        /* Is it contiguous with the preceding PFNs? If not then we're done. */
+        if ( (npfn == gfn_x(INVALID_GFN)) ||
+             (npfn != (pfn + (reverse ? -i : i))) )
+        {
+            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
+                return X86EMUL_RETRY;
+            done /= bytes_per_rep;
+            if ( done == 0 )
+            {
+                ASSERT(!reverse);
+                if ( npfn != gfn_x(INVALID_GFN) )
+                    return X86EMUL_UNHANDLEABLE;
+                *reps = 0;
+                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
+                return X86EMUL_EXCEPTION;
+            }
+            *reps = done;
+            break;
+        }
+
+        done += PAGE_SIZE;
+    }
+
+    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
+    return X86EMUL_OKAY;
+}
+
 /*
  * Map the frame(s) covering an individual linear access, for writeable
  * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
@@ -692,97 +781,7 @@ static void hvmemul_unmap_linear_addr(
         *mfn++ = _mfn(0);
     }
 #endif
-}
-
-/*
- * Convert addr from linear to physical form, valid over the range
- * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
- * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
- * @pfec indicates the access checks to be performed during page-table walks.
- */
-static int hvmemul_linear_to_phys(
-    unsigned long addr,
-    paddr_t *paddr,
-    unsigned int bytes_per_rep,
-    unsigned long *reps,
-    uint32_t pfec,
-    struct hvm_emulate_ctxt *hvmemul_ctxt)
-{
-    struct vcpu *curr = current;
-    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
-    int reverse;
-
-    /*
-     * Clip repetitions to a sensible maximum. This avoids extensive looping in
-     * this function while still amortising the cost of I/O trap-and-emulate.
-     */
-    *reps = min_t(unsigned long, *reps, 4096);
-
-    /* With no paging it's easy: linear == physical. */
-    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
-    {
-        *paddr = addr;
-        return X86EMUL_OKAY;
-    }
-
-    /* Reverse mode if this is a backwards multi-iteration string operation. */
-    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
-
-    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
-    {
-        /* Do page-straddling first iteration forwards via recursion. */
-        paddr_t _paddr;
-        unsigned long one_rep = 1;
-        int rc = hvmemul_linear_to_phys(
-            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
-        if ( rc != X86EMUL_OKAY )
-            return rc;
-        pfn = _paddr >> PAGE_SHIFT;
-    }
-    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
-    {
-        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-            return X86EMUL_RETRY;
-        *reps = 0;
-        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
-        return X86EMUL_EXCEPTION;
-    }
-
-    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
-    todo = *reps * bytes_per_rep;
-    for ( i = 1; done < todo; i++ )
-    {
-        /* Get the next PFN in the range. */
-        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
-        npfn = paging_gva_to_gfn(curr, addr, &pfec);
-
-        /* Is it contiguous with the preceding PFNs? If not then we're done. */
-        if ( (npfn == gfn_x(INVALID_GFN)) ||
-             (npfn != (pfn + (reverse ? -i : i))) )
-        {
-            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-                return X86EMUL_RETRY;
-            done /= bytes_per_rep;
-            if ( done == 0 )
-            {
-                ASSERT(!reverse);
-                if ( npfn != gfn_x(INVALID_GFN) )
-                    return X86EMUL_UNHANDLEABLE;
-                *reps = 0;
-                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
-                return X86EMUL_EXCEPTION;
-            }
-            *reps = done;
-            break;
-        }
-
-        done += PAGE_SIZE;
-    }
-
-    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
-    return X86EMUL_OKAY;
-}
-    
+}  
 
 static int hvmemul_virtual_to_linear(
     enum x86_segment seg,
-- 
2.17.1

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

WARNING: multiple messages have this Message-ID (diff)
From: Alexandru Stefan ISAILA <aisaila@bitdefender.com>
To: "xen-devel@lists.xenproject.org" <xen-devel@lists.xenproject.org>
Cc: "tamas@tklengyel.com" <tamas@tklengyel.com>,
	"wei.liu2@citrix.com" <wei.liu2@citrix.com>,
	"rcojocaru@bitdefender.com" <rcojocaru@bitdefender.com>,
	"george.dunlap@eu.citrix.com" <george.dunlap@eu.citrix.com>,
	"andrew.cooper3@citrix.com" <andrew.cooper3@citrix.com>,
	"paul.durrant@citrix.com" <paul.durrant@citrix.com>,
	"jbeulich@suse.com" <jbeulich@suse.com>,
	Alexandru Stefan ISAILA <aisaila@bitdefender.com>,
	"roger.pau@citrix.com" <roger.pau@citrix.com>
Subject: [Xen-devel] [PATCH v4 1/2] x86/emulate: Move hvmemul_linear_to_phys
Date: Mon, 20 May 2019 12:55:10 +0000	[thread overview]
Message-ID: <20190520125454.14805-1-aisaila@bitdefender.com> (raw)
Message-ID: <20190520125510.LYPm1XX3LEEP_SB8YaS-AIpMQ_g3MzDSMYhZ_o7_MUY@z> (raw)

Thiis is done so hvmemul_linear_to_phys() can be called from
hvmemul_send_vm_event().

Signed-off-by: Alexandru Isaila <aisaila@bitdefender.com>
---
 xen/arch/x86/hvm/emulate.c | 181 ++++++++++++++++++-------------------
 1 file changed, 90 insertions(+), 91 deletions(-)

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 8659c89862..254ff6515d 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -530,6 +530,95 @@ static int hvmemul_do_mmio_addr(paddr_t mmio_gpa,
     return hvmemul_do_io_addr(1, mmio_gpa, reps, size, dir, df, ram_gpa);
 }
 
+/*
+ * Convert addr from linear to physical form, valid over the range
+ * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
+ * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
+ * @pfec indicates the access checks to be performed during page-table walks.
+ */
+static int hvmemul_linear_to_phys(
+    unsigned long addr,
+    paddr_t *paddr,
+    unsigned int bytes_per_rep,
+    unsigned long *reps,
+    uint32_t pfec,
+    struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+    struct vcpu *curr = current;
+    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
+    int reverse;
+
+    /*
+     * Clip repetitions to a sensible maximum. This avoids extensive looping in
+     * this function while still amortising the cost of I/O trap-and-emulate.
+     */
+    *reps = min_t(unsigned long, *reps, 4096);
+
+    /* With no paging it's easy: linear == physical. */
+    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
+    {
+        *paddr = addr;
+        return X86EMUL_OKAY;
+    }
+
+    /* Reverse mode if this is a backwards multi-iteration string operation. */
+    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
+
+    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
+    {
+        /* Do page-straddling first iteration forwards via recursion. */
+        paddr_t _paddr;
+        unsigned long one_rep = 1;
+        int rc = hvmemul_linear_to_phys(
+            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
+        if ( rc != X86EMUL_OKAY )
+            return rc;
+        pfn = _paddr >> PAGE_SHIFT;
+    }
+    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
+    {
+        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
+            return X86EMUL_RETRY;
+        *reps = 0;
+        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
+        return X86EMUL_EXCEPTION;
+    }
+
+    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
+    todo = *reps * bytes_per_rep;
+    for ( i = 1; done < todo; i++ )
+    {
+        /* Get the next PFN in the range. */
+        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
+        npfn = paging_gva_to_gfn(curr, addr, &pfec);
+
+        /* Is it contiguous with the preceding PFNs? If not then we're done. */
+        if ( (npfn == gfn_x(INVALID_GFN)) ||
+             (npfn != (pfn + (reverse ? -i : i))) )
+        {
+            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
+                return X86EMUL_RETRY;
+            done /= bytes_per_rep;
+            if ( done == 0 )
+            {
+                ASSERT(!reverse);
+                if ( npfn != gfn_x(INVALID_GFN) )
+                    return X86EMUL_UNHANDLEABLE;
+                *reps = 0;
+                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
+                return X86EMUL_EXCEPTION;
+            }
+            *reps = done;
+            break;
+        }
+
+        done += PAGE_SIZE;
+    }
+
+    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
+    return X86EMUL_OKAY;
+}
+
 /*
  * Map the frame(s) covering an individual linear access, for writeable
  * access.  May return NULL for MMIO, or ERR_PTR(~X86EMUL_*) for other errors
@@ -692,97 +781,7 @@ static void hvmemul_unmap_linear_addr(
         *mfn++ = _mfn(0);
     }
 #endif
-}
-
-/*
- * Convert addr from linear to physical form, valid over the range
- * [addr, addr + *reps * bytes_per_rep]. *reps is adjusted according to
- * the valid computed range. It is always >0 when X86EMUL_OKAY is returned.
- * @pfec indicates the access checks to be performed during page-table walks.
- */
-static int hvmemul_linear_to_phys(
-    unsigned long addr,
-    paddr_t *paddr,
-    unsigned int bytes_per_rep,
-    unsigned long *reps,
-    uint32_t pfec,
-    struct hvm_emulate_ctxt *hvmemul_ctxt)
-{
-    struct vcpu *curr = current;
-    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
-    int reverse;
-
-    /*
-     * Clip repetitions to a sensible maximum. This avoids extensive looping in
-     * this function while still amortising the cost of I/O trap-and-emulate.
-     */
-    *reps = min_t(unsigned long, *reps, 4096);
-
-    /* With no paging it's easy: linear == physical. */
-    if ( !(curr->arch.hvm.guest_cr[0] & X86_CR0_PG) )
-    {
-        *paddr = addr;
-        return X86EMUL_OKAY;
-    }
-
-    /* Reverse mode if this is a backwards multi-iteration string operation. */
-    reverse = (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1);
-
-    if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
-    {
-        /* Do page-straddling first iteration forwards via recursion. */
-        paddr_t _paddr;
-        unsigned long one_rep = 1;
-        int rc = hvmemul_linear_to_phys(
-            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
-        if ( rc != X86EMUL_OKAY )
-            return rc;
-        pfn = _paddr >> PAGE_SHIFT;
-    }
-    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == gfn_x(INVALID_GFN) )
-    {
-        if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-            return X86EMUL_RETRY;
-        *reps = 0;
-        x86_emul_pagefault(pfec, addr, &hvmemul_ctxt->ctxt);
-        return X86EMUL_EXCEPTION;
-    }
-
-    done = reverse ? bytes_per_rep + offset : PAGE_SIZE - offset;
-    todo = *reps * bytes_per_rep;
-    for ( i = 1; done < todo; i++ )
-    {
-        /* Get the next PFN in the range. */
-        addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
-        npfn = paging_gva_to_gfn(curr, addr, &pfec);
-
-        /* Is it contiguous with the preceding PFNs? If not then we're done. */
-        if ( (npfn == gfn_x(INVALID_GFN)) ||
-             (npfn != (pfn + (reverse ? -i : i))) )
-        {
-            if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
-                return X86EMUL_RETRY;
-            done /= bytes_per_rep;
-            if ( done == 0 )
-            {
-                ASSERT(!reverse);
-                if ( npfn != gfn_x(INVALID_GFN) )
-                    return X86EMUL_UNHANDLEABLE;
-                *reps = 0;
-                x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt->ctxt);
-                return X86EMUL_EXCEPTION;
-            }
-            *reps = done;
-            break;
-        }
-
-        done += PAGE_SIZE;
-    }
-
-    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
-    return X86EMUL_OKAY;
-}
-    
+}  
 
 static int hvmemul_virtual_to_linear(
     enum x86_segment seg,
-- 
2.17.1

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

             reply	other threads:[~2019-05-20 12:55 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-20 12:55 Alexandru Stefan ISAILA [this message]
2019-05-20 12:55 ` [Xen-devel] [PATCH v4 1/2] x86/emulate: Move hvmemul_linear_to_phys Alexandru Stefan ISAILA
2019-05-20 12:55 ` [PATCH v4 2/2] x86/emulate: Send vm_event from emulate Alexandru Stefan ISAILA
2019-05-20 12:55   ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-22  9:56   ` Jan Beulich
2019-05-22  9:56     ` [Xen-devel] " Jan Beulich
2019-05-22 12:59     ` Alexandru Stefan ISAILA
2019-05-22 12:59       ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-22 13:34       ` Jan Beulich
2019-05-22 13:34         ` [Xen-devel] " Jan Beulich
2019-05-22 13:50         ` Alexandru Stefan ISAILA
2019-05-22 13:50           ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-22 13:57           ` Jan Beulich
2019-05-22 13:57             ` [Xen-devel] " Jan Beulich
2019-05-30  8:59     ` Alexandru Stefan ISAILA
2019-05-30  8:59       ` [Xen-devel] " Alexandru Stefan ISAILA
2019-05-31  9:16       ` Jan Beulich
2019-05-31  9:16         ` [Xen-devel] " Jan Beulich
2019-05-22 13:13 ` [PATCH v4 1/2] x86/emulate: Move hvmemul_linear_to_phys Paul Durrant
2019-05-22 13:13   ` [Xen-devel] " Paul Durrant
2019-05-22 13:55   ` George Dunlap
2019-05-22 13:55     ` [Xen-devel] " George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190520125454.14805-1-aisaila@bitdefender.com \
    --to=aisaila@bitdefender.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=paul.durrant@citrix.com \
    --cc=rcojocaru@bitdefender.com \
    --cc=roger.pau@citrix.com \
    --cc=tamas@tklengyel.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.