All of lore.kernel.org
 help / color / mirror / Atom feed
From: Julien Grall <julien.grall@linaro.org>
To: xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Julien Grall <julien.grall@linaro.org>,
	andre.przywara@linaro.org
Subject: [v2 06/16] xen/arm: Extend copy_to_guest to support copying from/to guest physical address
Date: Tue, 12 Dec 2017 19:02:02 +0000	[thread overview]
Message-ID: <20171212190212.5535-7-julien.grall@linaro.org> (raw)
In-Reply-To: <20171212190212.5535-1-julien.grall@linaro.org>

The only differences between copy_to_guest and access_guest_memory_by_ipa are:
    - The latter does not support copying data crossing page boundary
    - The former is copying from/to guest VA whilst the latter from
    guest PA

copy_to_guest can easily be extended to support copying from/to guest
physical address. For that a new bit is used to tell whether linear
address or ipa is been used.

Lastly access_guest_memory_by_ipa is reimplemented using copy_to_guest.
This also has the benefits to extend the use of it, it is now possible
to copy data crossing page boundary.

Signed-off-by: Julien Grall <julien.grall@linaro.org>

---
    Changes in v2:
        - Rework the patch after the interface changes in the previous
        patch.
        - Use uint64_t rather than paddr_t in translate_get_page
        - Add a BUILD_BUG_ON to check whether paddr_t fits in uint64_t
---
 xen/arch/arm/guestcopy.c | 91 +++++++++++++++++++++++-------------------------
 1 file changed, 44 insertions(+), 47 deletions(-)

diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 7e92e27beb..93e4aa2d3f 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -8,6 +8,8 @@
 #define COPY_flush_dcache   (1U << 0)
 #define COPY_from_guest     (0U << 1)
 #define COPY_to_guest       (1U << 1)
+#define COPY_ipa            (0U << 2)
+#define COPY_linear         (1U << 2)
 
 typedef union
 {
@@ -15,9 +17,39 @@ typedef union
     {
         struct vcpu *v;
     } gva;
+
+    struct
+    {
+        struct domain *d;
+    } gpa;
 } copy_info_t;
 
 #define GVA_INFO(vcpu) ((copy_info_t) { .gva = { vcpu } })
+#define GPA_INFO(domain) ((copy_info_t) { .gpa = { domain } })
+
+static struct page_info *translate_get_page(copy_info_t info, uint64_t addr,
+                                            bool linear, bool write)
+{
+    p2m_type_t p2mt;
+    struct page_info *page;
+
+    if ( linear )
+        return get_page_from_gva(info.gva.v, addr,
+                                 write ? GV2M_WRITE : GV2M_READ);
+
+    page = get_page_from_gfn(info.gpa.d, paddr_to_pfn(addr), &p2mt, P2M_ALLOC);
+
+    if ( !page )
+        return NULL;
+
+    if ( !p2m_is_ram(p2mt) )
+    {
+        put_page(page);
+        return NULL;
+    }
+
+    return page;
+}
 
 static unsigned long copy_guest(void *buf, uint64_t addr, unsigned int len,
                                 copy_info_t info, unsigned int flags)
@@ -26,6 +58,7 @@ static unsigned long copy_guest(void *buf, uint64_t addr, unsigned int len,
     unsigned offset = addr & ~PAGE_MASK;
 
     BUILD_BUG_ON((sizeof(addr)) < sizeof(vaddr_t));
+    BUILD_BUG_ON((sizeof(addr)) < sizeof(paddr_t));
 
     while ( len )
     {
@@ -33,8 +66,8 @@ static unsigned long copy_guest(void *buf, uint64_t addr, unsigned int len,
         unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
         struct page_info *page;
 
-        page = get_page_from_gva(info.gva.v, addr,
-                                 (flags & COPY_to_guest) ? GV2M_WRITE : GV2M_READ);
+        page = translate_get_page(info, addr, flags & COPY_linear,
+                                  flags & COPY_to_guest);
         if ( page == NULL )
             return len;
 
@@ -75,75 +108,39 @@ static unsigned long copy_guest(void *buf, uint64_t addr, unsigned int len,
 unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
 {
     return copy_guest((void *)from, (vaddr_t)to, len,
-                      GVA_INFO(current), COPY_to_guest);
+                      GVA_INFO(current), COPY_to_guest | COPY_linear);
 }
 
 unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from,
                                              unsigned len)
 {
     return copy_guest((void *)from, (vaddr_t)to, len, GVA_INFO(current),
-                      COPY_to_guest | COPY_flush_dcache);
+                      COPY_to_guest | COPY_flush_dcache | COPY_linear);
 }
 
 unsigned long raw_clear_guest(void *to, unsigned len)
 {
     return copy_guest(NULL, (vaddr_t)to, len, GVA_INFO(current),
-                      COPY_to_guest);
+                      COPY_to_guest | COPY_linear);
 }
 
 unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len)
 {
     return copy_guest(to, (vaddr_t)from, len, GVA_INFO(current),
-                      COPY_from_guest);
+                      COPY_from_guest | COPY_linear);
 }
 
-/*
- * Temporarily map one physical guest page and copy data to or from it.
- * The data to be copied cannot cross a page boundary.
- */
 int access_guest_memory_by_ipa(struct domain *d, paddr_t gpa, void *buf,
                                uint32_t size, bool is_write)
 {
-    struct page_info *page;
-    uint64_t offset = gpa & ~PAGE_MASK;  /* Offset within the mapped page */
-    p2m_type_t p2mt;
-    void *p;
-
-    /* Do not cross a page boundary. */
-    if ( size > (PAGE_SIZE - offset) )
-    {
-        printk(XENLOG_G_ERR "d%d: guestcopy: memory access crosses page boundary.\n",
-               d->domain_id);
-        return -EINVAL;
-    }
-
-    page = get_page_from_gfn(d, paddr_to_pfn(gpa), &p2mt, P2M_ALLOC);
-    if ( !page )
-    {
-        printk(XENLOG_G_ERR "d%d: guestcopy: failed to get table entry.\n",
-               d->domain_id);
-        return -EINVAL;
-    }
+    unsigned long left;
+    int flags = COPY_ipa;
 
-    if ( !p2m_is_ram(p2mt) )
-    {
-        put_page(page);
-        printk(XENLOG_G_ERR "d%d: guestcopy: guest memory should be RAM.\n",
-               d->domain_id);
-        return -EINVAL;
-    }
-
-    p = __map_domain_page(page);
+    flags |= is_write ? COPY_to_guest : COPY_from_guest;
 
-    if ( is_write )
-        memcpy(p + offset, buf, size);
-    else
-        memcpy(buf, p + offset, size);
+    left = copy_guest(buf, gpa, size, GPA_INFO(d), flags);
 
-    unmap_domain_page(p);
-    put_page(page);
-
-    return 0;
+    return (!left) ? 0 : -EINVAL;
 }
 
 /*
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2017-12-12 19:02 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-12 19:01 [v2 00/16] xen/arm: Stage-2 handling cleanup Julien Grall
2017-12-12 19:01 ` [v2 01/16] xen/arm: raw_copy_to_guest_helper: Rename flush_dcache to flags Julien Grall
2017-12-12 19:01 ` [v2 02/16] xen/arm: raw_copy_to_guest_helper: Rework the prototype and rename it Julien Grall
2017-12-12 19:51   ` Stefano Stabellini
2017-12-12 19:01 ` [v2 03/16] xen/arm: Extend copy_to_guest to support copying from guest VA and use it Julien Grall
2017-12-12 19:54   ` Stefano Stabellini
2017-12-12 19:02 ` [v2 04/16] xen/arm: Extend copy_to_guest to support zeroing " Julien Grall
2017-12-12 19:57   ` Stefano Stabellini
2017-12-12 19:02 ` [v2 05/16] xen/arm: guest_copy: Extend the prototype to pass the vCPU Julien Grall
2017-12-12 20:00   ` Stefano Stabellini
2017-12-12 19:02 ` Julien Grall [this message]
2017-12-12 20:06   ` [v2 06/16] xen/arm: Extend copy_to_guest to support copying from/to guest physical address Stefano Stabellini
2017-12-12 19:02 ` [v2 07/16] xen/arm: Introduce copy_to_guest_phys_flush_dcache Julien Grall
2017-12-12 20:10   ` Stefano Stabellini
2017-12-12 19:02 ` [v2 08/16] xen/arm: kernel: Rework kernel_zimage_load to use the generic copy helper Julien Grall
2017-12-12 19:02 ` [v2 09/16] xen/arm: domain_build: Rework initrd_load " Julien Grall
2017-12-12 19:02 ` [v2 10/16] xen/arm: domain_build: Use copy_to_guest_phys_flush_dcache in dtb_load Julien Grall
2017-12-12 19:02 ` [v2 11/16] xen/arm: p2m: Rename p2m_flush_tlb and p2m_flush_tlb_sync Julien Grall
2017-12-12 19:02 ` [v2 12/16] xen/arm: p2m: Introduce p2m_tlb_flush_sync, export it and use it Julien Grall
2017-12-12 19:02 ` [v2 13/16] xen/arm: p2m: Fold p2m_tlb_flush into p2m_force_tlb_flush_sync Julien Grall
2017-12-12 19:02 ` [v2 14/16] xen/arm: traps: Remove the field gva from mmio_info_t Julien Grall
2017-12-12 19:02 ` [v2 15/16] xen/arm: traps: Move the definition of mmio_info_t in try_handle_mmio Julien Grall
2017-12-12 19:02 ` [v2 16/16] xen/arm: traps: Merge do_trap_instr_abort_guest and do_trap_data_abort_guest Julien Grall
2017-12-12 20:11   ` Stefano Stabellini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171212190212.5535-7-julien.grall@linaro.org \
    --to=julien.grall@linaro.org \
    --cc=andre.przywara@linaro.org \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.