linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] xen: support priv-mapping in an HVM tools domain
@ 2017-11-01 11:31 Paul Durrant
  2017-11-01 13:39 ` Juergen Gross
  0 siblings, 1 reply; 7+ messages in thread
From: Paul Durrant @ 2017-11-01 11:31 UTC (permalink / raw)
  To: x86, xen-devel, linux-kernel
  Cc: Paul Durrant, Boris Ostrovsky, Juergen Gross, Thomas Gleixner,
	Ingo Molnar, H. Peter Anvin

If the domain has XENFEAT_auto_translated_physmap then use of the PV-
specific HYPERVISOR_mmu_update hypercall is clearly incorrect.

This patch adds checks in xen_remap_domain_gfn_array() and
xen_unmap_domain_gfn_array() which call through to the approprate
xlate_mmu function if the feature is present.

This patch also moves xen_remap_domain_gfn_range() into the PV-only MMU
code and #ifdefs the (only) calling code in privcmd accordingly.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
---
 arch/x86/xen/mmu.c    | 36 +++++++++++++++++-------------------
 arch/x86/xen/mmu_pv.c | 11 +++++++++++
 drivers/xen/privcmd.c | 17 +++++++++++++----
 include/xen/xen-ops.h |  7 +++++++
 4 files changed, 48 insertions(+), 23 deletions(-)

diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3e15345abfe7..01837c36e293 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -91,12 +91,12 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
 	return 0;
 }
 
-static int do_remap_gfn(struct vm_area_struct *vma,
-			unsigned long addr,
-			xen_pfn_t *gfn, int nr,
-			int *err_ptr, pgprot_t prot,
-			unsigned domid,
-			struct page **pages)
+int xen_remap_gfn(struct vm_area_struct *vma,
+		  unsigned long addr,
+		  xen_pfn_t *gfn, int nr,
+		  int *err_ptr, pgprot_t prot,
+		  unsigned int domid,
+		  struct page **pages)
 {
 	int err = 0;
 	struct remap_data rmd;
@@ -166,36 +166,34 @@ static int do_remap_gfn(struct vm_area_struct *vma,
 	return err < 0 ? err : mapped;
 }
 
-int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
-			       unsigned long addr,
-			       xen_pfn_t gfn, int nr,
-			       pgprot_t prot, unsigned domid,
-			       struct page **pages)
-{
-	return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
-}
-EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
-
 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
 			       unsigned long addr,
 			       xen_pfn_t *gfn, int nr,
 			       int *err_ptr, pgprot_t prot,
 			       unsigned domid, struct page **pages)
 {
+	if (xen_feature(XENFEAT_auto_translated_physmap))
+		return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
+						 prot, domid, pages);
+
 	/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
 	 * and the consequences later is quite hard to detect what the actual
 	 * cause of "wrong memory was mapped in".
 	 */
 	BUG_ON(err_ptr == NULL);
-	return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
+	return xen_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid,
+			     pages);
 }
 EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
 
 /* Returns: 0 success */
 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
-			       int numpgs, struct page **pages)
+			       int nr, struct page **pages)
 {
-	if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
+	if (xen_feature(XENFEAT_auto_translated_physmap))
+		return xen_xlate_unmap_gfn_range(vma, nr, pages);
+
+	if (!pages)
 		return 0;
 
 	return -EINVAL;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 71495f1a86d7..4974d8a6c2b4 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -2670,3 +2670,14 @@ phys_addr_t paddr_vmcoreinfo_note(void)
 		return __pa(vmcoreinfo_note);
 }
 #endif /* CONFIG_KEXEC_CORE */
+
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
+			       unsigned long addr,
+			       xen_pfn_t gfn, int nr,
+			       pgprot_t prot, unsigned int domid,
+			       struct page **pages)
+{
+	return xen_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid,
+			     pages);
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index feca75b07fdd..b58a1719b606 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -215,6 +215,8 @@ static int traverse_pages_block(unsigned nelem, size_t size,
 	return ret;
 }
 
+#ifdef CONFIG_XEN_PV
+
 struct mmap_gfn_state {
 	unsigned long va;
 	struct vm_area_struct *vma;
@@ -261,10 +263,6 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
 	LIST_HEAD(pagelist);
 	struct mmap_gfn_state state;
 
-	/* We only support privcmd_ioctl_mmap_batch for auto translated. */
-	if (xen_feature(XENFEAT_auto_translated_physmap))
-		return -ENOSYS;
-
 	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
 		return -EFAULT;
 
@@ -312,6 +310,17 @@ static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
 	return rc;
 }
 
+#else
+
+static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
+{
+	/* We only support privcmd_ioctl_mmap for PV. */
+	return -ENOSYS;
+}
+
+
+#endif /* CONFIG_XEN_PV */
+
 struct mmap_batch_state {
 	domid_t domain;
 	unsigned long va;
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 218e6aae5433..663a9a06b762 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -60,6 +60,10 @@ static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
 
 struct vm_area_struct;
 
+int xen_remap_gfn(struct vm_area_struct *vma, unsigned long addr,
+		  xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot,
+		  unsigned int domid, struct page **pages);
+
 /*
  * xen_remap_domain_gfn_array() - map an array of foreign frames
  * @vma:     VMA to map the pages into
@@ -84,6 +88,7 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
 			       unsigned domid,
 			       struct page **pages);
 
+#ifdef CONFIG_XEN_PV
 /* xen_remap_domain_gfn_range() - map a range of foreign frames
  * @vma:     VMA to map the pages into
  * @addr:    Address at which to map the pages
@@ -101,6 +106,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
 			       xen_pfn_t gfn, int nr,
 			       pgprot_t prot, unsigned domid,
 			       struct page **pages);
+#endif
+
 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
 			       int numpgs, struct page **pages);
 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2017-11-02 20:58 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-01 11:31 [PATCH v2] xen: support priv-mapping in an HVM tools domain Paul Durrant
2017-11-01 13:39 ` Juergen Gross
2017-11-01 13:45   ` Paul Durrant
2017-11-01 15:37     ` Juergen Gross
2017-11-01 18:18       ` Boris Ostrovsky
2017-11-02  9:30         ` Paul Durrant
2017-11-02 20:59           ` Boris Ostrovsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).